Merge 'imr: switch back to open-coded description of structures' from Michał Chojnowski

Commit aab6b0ee27 introduced the
controversial new IMR format, which relied on a very template-heavy
infrastructure to generate serialization and deserialization code via
template meta-programming. The promise was that this new format, beyond
solving the problems the previous open-coded representation had (working
on linearized buffers), will speed up migrating other components to this
IMR format, as the IMR infrastructure reduces code bloat, makes the code
more readable via declarative type descriptions as well as safer.
However, the results were almost the opposite. The template
meta-programming used by the IMR infrastructure proved very hard to
understand. Developers don't want to read or modify it. Maintainers
don't want to see it being used anywhere else. In short, nobody wants to
touch it.

This commit does a conceptual revert of
aab6b0ee27. A verbatim revert is not
possible because related code evolved a lot since the merge. Also, going
back to the previous code would mean we regress as we'd revert the move
to fragmented buffers. So this revert is only conceptual, it changes the
underlying infrastructure back to the previous open-coded one, but keeps
the fragmented buffers, as well as the interface of the related
components (to the extent possible).

Fixes: #5578

Closes #8106

* github.com:scylladb/scylla:
  imr: switch back to open-coded description of structures
  utils: managed_bytes: add a few trivial helper methods
  utils: fragment_range: move FragmentedView helpers to fragment_range.hh
  utils: fragment_range: add single_fragmented_mutable_view
  utils: fragment_range: implement FragmentRange for fragment_range
  utils: mutable_view: add front()
  types: remove an unused helper function
  test: mutation_test: fix memory calculations in make_fragments_with_non_monotonic_positions
  test: mutation_test: remove an obsolete assertion
  test: mutation_test: initialize an uninitialized variable
  test: sstable_datafile_test: fix tracking of closed sstables in sstable_run_based_compaction_test
This commit is contained in:
Avi Kivity
2021-02-17 10:21:47 +02:00
committed by Nadav Har'El
56 changed files with 647 additions and 5111 deletions

View File

@@ -24,142 +24,82 @@
#include "counters.hh"
#include "types.hh"
/// LSA mirator for cells with irrelevant type
///
///
const data::type_imr_descriptor& no_type_imr_descriptor() {
static thread_local data::type_imr_descriptor state(data::type_info::make_variable_size());
return state;
}
atomic_cell atomic_cell::make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time) {
auto& imr_data = no_type_imr_descriptor();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_dead(timestamp, deletion_time), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_dead(timestamp, deletion_time);
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, bytes_view value, atomic_cell::collection_member cm) {
auto& imr_data = type.imr_state();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, bool(cm)), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live(timestamp, single_fragment_range(value));
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, managed_bytes_view value, atomic_cell::collection_member cm) {
return atomic_cell_type::make_live(timestamp, fragment_range(value));
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, ser::buffer_view<bytes_ostream::fragment_iterator> value, atomic_cell::collection_member cm) {
auto& imr_data = type.imr_state();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, bool(cm)), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live(timestamp, value);
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value, collection_member cm)
{
auto& imr_data = type.imr_state();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, bool(cm)), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live(timestamp, value);
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, bytes_view value,
gc_clock::time_point expiry, gc_clock::duration ttl, atomic_cell::collection_member cm) {
auto& imr_data = type.imr_state();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, expiry, ttl, bool(cm)), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live(timestamp, single_fragment_range(value), expiry, ttl);
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, managed_bytes_view value,
gc_clock::time_point expiry, gc_clock::duration ttl, atomic_cell::collection_member cm) {
return atomic_cell_type::make_live(timestamp, fragment_range(value), expiry, ttl);
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, ser::buffer_view<bytes_ostream::fragment_iterator> value,
gc_clock::time_point expiry, gc_clock::duration ttl, atomic_cell::collection_member cm) {
auto& imr_data = type.imr_state();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, expiry, ttl, bool(cm)), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live(timestamp, value, expiry, ttl);
}
atomic_cell atomic_cell::make_live(const abstract_type& type, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value,
gc_clock::time_point expiry, gc_clock::duration ttl, collection_member cm)
{
auto& imr_data = type.imr_state();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live(imr_data.type_info(), timestamp, value, expiry, ttl, bool(cm)), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live(timestamp, value, expiry, ttl);
}
atomic_cell atomic_cell::make_live_counter_update(api::timestamp_type timestamp, int64_t value) {
auto& imr_data = no_type_imr_descriptor();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live_counter_update(timestamp, value), &imr_data.lsa_migrator())
);
return atomic_cell_type::make_live_counter_update(timestamp, value);
}
atomic_cell atomic_cell::make_live_uninitialized(const abstract_type& type, api::timestamp_type timestamp, size_t size) {
auto& imr_data = no_type_imr_descriptor();
return atomic_cell(
imr_data.type_info(),
imr_object_type::make(data::cell::make_live_uninitialized(imr_data.type_info(), timestamp, size), &imr_data.lsa_migrator())
);
}
static imr::utils::object<data::cell::structure> copy_cell(const data::type_imr_descriptor& imr_data, const uint8_t* ptr)
{
using imr_object_type = imr::utils::object<data::cell::structure>;
// If the cell doesn't own any memory it is trivial and can be copied with
// memcpy.
auto f = data::cell::structure::get_member<data::cell::tags::flags>(ptr);
if (!f.template get<data::cell::tags::external_data>()) {
data::cell::context ctx(f, imr_data.type_info());
// XXX: We may be better off storing the total cell size in memory. Measure!
auto size = data::cell::structure::serialized_object_size(ptr, ctx);
return imr_object_type::make_raw(size, [&] (uint8_t* dst) noexcept {
std::copy_n(ptr, size, dst);
}, &imr_data.lsa_migrator());
}
return imr_object_type::make(data::cell::copy_fn(imr_data.type_info(), ptr), &imr_data.lsa_migrator());
return atomic_cell_type::make_live_uninitialized(timestamp, size);
}
atomic_cell::atomic_cell(const abstract_type& type, atomic_cell_view other)
: atomic_cell(type.imr_state().type_info(),
copy_cell(type.imr_state(), other._view.raw_pointer()))
{ }
: _data(other._view) {
set_view(_data);
}
atomic_cell_or_collection atomic_cell_or_collection::copy(const abstract_type& type) const {
if (!_data.get()) {
if (_data.empty()) {
return atomic_cell_or_collection();
}
auto& imr_data = type.imr_state();
return atomic_cell_or_collection(
copy_cell(imr_data, _data.get())
);
return atomic_cell_or_collection(managed_bytes(_data));
}
atomic_cell_or_collection::atomic_cell_or_collection(const abstract_type& type, atomic_cell_view acv)
: _data(copy_cell(type.imr_state(), acv._view.raw_pointer()))
: _data(acv._view)
{
}
bool atomic_cell_or_collection::equals(const abstract_type& type, const atomic_cell_or_collection& other) const
{
auto ptr_a = _data.get();
auto ptr_b = other._data.get();
if (!ptr_a || !ptr_b) {
return !ptr_a && !ptr_b;
if (_data.empty() || other._data.empty()) {
return _data.empty() && other._data.empty();
}
if (type.is_atomic()) {
auto a = atomic_cell_view::from_bytes(type.imr_state().type_info(), _data);
auto b = atomic_cell_view::from_bytes(type.imr_state().type_info(), other._data);
auto a = atomic_cell_view::from_bytes(type, _data);
auto b = atomic_cell_view::from_bytes(type, other._data);
if (a.timestamp() != b.timestamp()) {
return false;
}
@@ -191,28 +131,7 @@ bool atomic_cell_or_collection::equals(const abstract_type& type, const atomic_c
size_t atomic_cell_or_collection::external_memory_usage(const abstract_type& t) const
{
if (!_data.get()) {
return 0;
}
auto ctx = data::cell::context(_data.get(), t.imr_state().type_info());
auto view = data::cell::structure::make_view(_data.get(), ctx);
auto flags = view.get<data::cell::tags::flags>();
size_t external_value_size = 0;
if (flags.get<data::cell::tags::external_data>()) {
if (flags.get<data::cell::tags::collection>()) {
external_value_size = as_collection_mutation().data.size_bytes();
} else {
auto cell_view = data::cell::atomic_cell_view(t.imr_state().type_info(), view);
external_value_size = cell_view.value_size();
}
// Add overhead of chunk headers. The last one is a special case.
external_value_size += (external_value_size - 1) / data::cell::effective_external_chunk_length * data::cell::external_chunk_overhead;
external_value_size += data::cell::external_last_chunk_overhead;
}
return data::cell::structure::serialized_object_size(_data.get(), ctx)
+ imr_object_type::size_overhead + external_value_size;
return _data.external_memory_usage();
}
std::ostream&
@@ -221,7 +140,7 @@ operator<<(std::ostream& os, const atomic_cell_view& acv) {
return fmt_print(os, "atomic_cell{{{},ts={:d},expiry={:d},ttl={:d}}}",
acv.is_counter_update()
? "counter_update_value=" + to_sstring(acv.counter_update_value())
: to_hex(acv.value().linearize()),
: to_hex(to_bytes(acv.value())),
acv.timestamp(),
acv.is_live_and_has_ttl() ? acv.expiry().time_since_epoch().count() : -1,
acv.is_live_and_has_ttl() ? acv.ttl().count() : 0);
@@ -247,12 +166,11 @@ operator<<(std::ostream& os, const atomic_cell_view::printer& acvp) {
cell_value_string_builder << "counter_update_value=" << acv.counter_update_value();
} else {
cell_value_string_builder << "shards: ";
counter_cell_view::with_linearized(acv, [&cell_value_string_builder] (counter_cell_view& ccv) {
cell_value_string_builder << ::join(", ", ccv.shards());
});
auto ccv = counter_cell_view(acv);
cell_value_string_builder << ::join(", ", ccv.shards());
}
} else {
cell_value_string_builder << type.to_string(acv.value().linearize());
cell_value_string_builder << type.to_string(to_bytes(acv.value()));
}
return fmt_print(os, "atomic_cell{{{},ts={:d},expiry={:d},ttl={:d}}}",
cell_value_string_builder.str(),
@@ -271,12 +189,11 @@ operator<<(std::ostream& os, const atomic_cell::printer& acp) {
}
std::ostream& operator<<(std::ostream& os, const atomic_cell_or_collection::printer& p) {
if (!p._cell._data.get()) {
if (p._cell._data.empty()) {
return os << "{ null atomic_cell_or_collection }";
}
using dc = data::cell;
os << "{ ";
if (dc::structure::get_member<dc::tags::flags>(p._cell._data.get()).get<dc::tags::collection>()) {
if (p._cdef.type->is_collection()) {
os << "collection ";
auto cmv = p._cell.as_collection_mutation();
os << collection_mutation_view::printer(*p._cdef.type, cmv);

View File

@@ -26,12 +26,12 @@
#include "tombstone.hh"
#include "gc_clock.hh"
#include "utils/managed_bytes.hh"
#include "utils/fragment_range.hh"
#include <seastar/net//byteorder.hh>
#include <seastar/util/bool_class.hh>
#include <cstdint>
#include <iosfwd>
#include "data/cell.hh"
#include "data/schema_info.hh"
#include "imr/utils.hh"
#include <concepts>
#include "utils/fragmented_temporary_buffer.hh"
#include "serializer.hh"
@@ -40,41 +40,191 @@ class abstract_type;
class collection_type_impl;
class atomic_cell_or_collection;
using atomic_cell_value_view = data::value_view;
using atomic_cell_value_mutable_view = data::value_mutable_view;
using atomic_cell_value = managed_bytes;
template <mutable_view is_mutable>
using atomic_cell_value_basic_view = managed_bytes_basic_view<is_mutable>;
using atomic_cell_value_view = atomic_cell_value_basic_view<mutable_view::no>;
using atomic_cell_value_mutable_view = atomic_cell_value_basic_view<mutable_view::yes>;
template <typename T>
requires std::is_trivial_v<T>
static void set_field(atomic_cell_value_mutable_view& out, unsigned offset, T val) {
auto out_view = managed_bytes_mutable_view(out);
out_view.remove_prefix(offset);
write<T>(out_view, val);
}
template <typename T>
requires std::is_trivial_v<T>
static void set_field(atomic_cell_value& out, unsigned offset, T val) {
auto out_view = atomic_cell_value_mutable_view(out);
set_field(out_view, offset, val);
}
template <FragmentRange Buffer>
static void set_value(managed_bytes& b, unsigned value_offset, const Buffer& value) {
auto v = managed_bytes_mutable_view(b).substr(value_offset, value.size_bytes());
for (auto frag : value) {
write_fragmented(v, single_fragmented_view(frag));
}
}
template <typename T, FragmentedView Input>
requires std::is_trivial_v<T>
static T get_field(Input in, unsigned offset = 0) {
in.remove_prefix(offset);
return read_simple<T>(in);
}
/*
* Represents atomic cell layout. Works on serialized form.
*
* Layout:
*
* <live> := <int8_t:flags><int64_t:timestamp>(<int64_t:expiry><int32_t:ttl>)?<value>
* <dead> := <int8_t: 0><int64_t:timestamp><int64_t:deletion_time>
*/
class atomic_cell_type final {
private:
static constexpr int8_t LIVE_FLAG = 0x01;
static constexpr int8_t EXPIRY_FLAG = 0x02; // When present, expiry field is present. Set only for live cells
static constexpr int8_t COUNTER_UPDATE_FLAG = 0x08; // Cell is a counter update.
static constexpr unsigned flags_size = 1;
static constexpr unsigned timestamp_offset = flags_size;
static constexpr unsigned timestamp_size = 8;
static constexpr unsigned expiry_offset = timestamp_offset + timestamp_size;
static constexpr unsigned expiry_size = 8;
static constexpr unsigned deletion_time_offset = timestamp_offset + timestamp_size;
static constexpr unsigned deletion_time_size = 8;
static constexpr unsigned ttl_offset = expiry_offset + expiry_size;
static constexpr unsigned ttl_size = 4;
friend class counter_cell_builder;
private:
static bool is_counter_update(atomic_cell_value_view cell) {
return cell.front() & COUNTER_UPDATE_FLAG;
}
static bool is_live(atomic_cell_value_view cell) {
return cell.front() & LIVE_FLAG;
}
static bool is_live_and_has_ttl(atomic_cell_value_view cell) {
return cell.front() & EXPIRY_FLAG;
}
static bool is_dead(atomic_cell_value_view cell) {
return !is_live(cell);
}
// Can be called on live and dead cells
static api::timestamp_type timestamp(atomic_cell_value_view cell) {
return get_field<api::timestamp_type>(cell, timestamp_offset);
}
static void set_timestamp(atomic_cell_value_mutable_view& cell, api::timestamp_type ts) {
set_field(cell, timestamp_offset, ts);
}
// Can be called on live cells only
private:
template <mutable_view is_mutable>
static managed_bytes_basic_view<is_mutable> do_get_value(managed_bytes_basic_view<is_mutable> cell) {
auto expiry_field_size = bool(cell.front() & EXPIRY_FLAG) * (expiry_size + ttl_size);
auto value_offset = flags_size + timestamp_size + expiry_field_size;
cell.remove_prefix(value_offset);
return cell;
}
public:
static atomic_cell_value_view value(managed_bytes_view cell) {
return do_get_value(cell);
}
static atomic_cell_value_mutable_view value(managed_bytes_mutable_view cell) {
return do_get_value(cell);
}
// Can be called on live counter update cells only
static int64_t counter_update_value(atomic_cell_value_view cell) {
return get_field<int64_t>(cell, flags_size + timestamp_size);
}
// Can be called only when is_dead() is true.
static gc_clock::time_point deletion_time(atomic_cell_value_view cell) {
assert(is_dead(cell));
return gc_clock::time_point(gc_clock::duration(get_field<int64_t>(cell, deletion_time_offset)));
}
// Can be called only when is_live_and_has_ttl() is true.
static gc_clock::time_point expiry(atomic_cell_value_view cell) {
assert(is_live_and_has_ttl(cell));
auto expiry = get_field<int64_t>(cell, expiry_offset);
return gc_clock::time_point(gc_clock::duration(expiry));
}
// Can be called only when is_live_and_has_ttl() is true.
static gc_clock::duration ttl(atomic_cell_value_view cell) {
assert(is_live_and_has_ttl(cell));
return gc_clock::duration(get_field<int32_t>(cell, ttl_offset));
}
static managed_bytes make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time) {
managed_bytes b(managed_bytes::initialized_later(), flags_size + timestamp_size + deletion_time_size);
b[0] = 0;
set_field(b, timestamp_offset, timestamp);
set_field(b, deletion_time_offset, static_cast<int64_t>(deletion_time.time_since_epoch().count()));
return b;
}
template <FragmentRange Buffer>
static managed_bytes make_live(api::timestamp_type timestamp, const Buffer& value) {
auto value_offset = flags_size + timestamp_size;
managed_bytes b(managed_bytes::initialized_later(), value_offset + value.size_bytes());
b[0] = LIVE_FLAG;
set_field(b, timestamp_offset, timestamp);
set_value(b, value_offset, value);
return b;
}
static managed_bytes make_live_counter_update(api::timestamp_type timestamp, int64_t value) {
auto value_offset = flags_size + timestamp_size;
managed_bytes b(managed_bytes::initialized_later(), value_offset + sizeof(value));
b[0] = LIVE_FLAG | COUNTER_UPDATE_FLAG;
set_field(b, timestamp_offset, timestamp);
set_field(b, value_offset, value);
return b;
}
template <FragmentRange Buffer>
static managed_bytes make_live(api::timestamp_type timestamp, const Buffer& value, gc_clock::time_point expiry, gc_clock::duration ttl) {
auto value_offset = flags_size + timestamp_size + expiry_size + ttl_size;
managed_bytes b(managed_bytes::initialized_later(), value_offset + value.size_bytes());
b[0] = EXPIRY_FLAG | LIVE_FLAG;
set_field(b, timestamp_offset, timestamp);
set_field(b, expiry_offset, static_cast<int64_t>(expiry.time_since_epoch().count()));
set_field(b, ttl_offset, static_cast<int32_t>(ttl.count()));
set_value(b, value_offset, value);
return b;
}
static managed_bytes make_live_uninitialized(api::timestamp_type timestamp, size_t size) {
auto value_offset = flags_size + timestamp_size;
managed_bytes b(managed_bytes::initialized_later(), value_offset + size);
b[0] = LIVE_FLAG;
set_field(b, timestamp_offset, timestamp);
return b;
}
template <mutable_view is_mutable>
friend class basic_atomic_cell_view;
friend class atomic_cell;
};
/// View of an atomic cell
template<mutable_view is_mutable>
class basic_atomic_cell_view {
protected:
data::cell::basic_atomic_cell_view<is_mutable> _view;
friend class atomic_cell;
public:
using pointer_type = std::conditional_t<is_mutable == mutable_view::no, const uint8_t*, uint8_t*>;
managed_bytes_basic_view<is_mutable> _view;
friend class atomic_cell;
protected:
explicit basic_atomic_cell_view(data::cell::basic_atomic_cell_view<is_mutable> v)
: _view(std::move(v)) { }
basic_atomic_cell_view(const data::type_info& ti, pointer_type ptr)
: _view(data::cell::make_atomic_cell_view(ti, ptr))
{ }
void set_view(managed_bytes_basic_view<is_mutable> v) {
_view = v;
}
basic_atomic_cell_view() = default;
explicit basic_atomic_cell_view(managed_bytes_basic_view<is_mutable> v) : _view(std::move(v)) { }
friend class atomic_cell_or_collection;
public:
operator basic_atomic_cell_view<mutable_view::no>() const noexcept {
return basic_atomic_cell_view<mutable_view::no>(_view);
}
void swap(basic_atomic_cell_view& other) noexcept {
using std::swap;
swap(_view, other._view);
}
bool is_counter_update() const {
return _view.is_counter_update();
return atomic_cell_type::is_counter_update(_view);
}
bool is_live() const {
return _view.is_live();
return atomic_cell_type::is_live(_view);
}
bool is_live(tombstone t, bool is_counter) const {
return is_live() && !is_covered_by(t, is_counter);
@@ -83,73 +233,72 @@ public:
return is_live() && !is_covered_by(t, is_counter) && !has_expired(now);
}
bool is_live_and_has_ttl() const {
return _view.is_expiring();
return atomic_cell_type::is_live_and_has_ttl(_view);
}
bool is_dead(gc_clock::time_point now) const {
return !is_live() || has_expired(now);
return atomic_cell_type::is_dead(_view) || has_expired(now);
}
bool is_covered_by(tombstone t, bool is_counter) const {
return timestamp() <= t.timestamp || (is_counter && t.timestamp != api::missing_timestamp);
}
// Can be called on live and dead cells
api::timestamp_type timestamp() const {
return _view.timestamp();
return atomic_cell_type::timestamp(_view);
}
void set_timestamp(api::timestamp_type ts) {
_view.set_timestamp(ts);
atomic_cell_type::set_timestamp(_view, ts);
}
// Can be called on live cells only
data::basic_value_view<is_mutable> value() const {
return _view.value();
atomic_cell_value_basic_view<is_mutable> value() const {
return atomic_cell_type::value(_view);
}
// Can be called on live cells only
size_t value_size() const {
return _view.value_size();
return atomic_cell_type::value(_view).size();
}
bool is_value_fragmented() const {
return _view.is_value_fragmented();
return _view.is_fragmented();
}
// Can be called on live counter update cells only
int64_t counter_update_value() const {
return _view.counter_update_value();
return atomic_cell_type::counter_update_value(_view);
}
// Can be called only when is_dead(gc_clock::time_point)
gc_clock::time_point deletion_time() const {
return !is_live() ? _view.deletion_time() : expiry() - ttl();
return !is_live() ? atomic_cell_type::deletion_time(_view) : expiry() - ttl();
}
// Can be called only when is_live_and_has_ttl()
gc_clock::time_point expiry() const {
return _view.expiry();
return atomic_cell_type::expiry(_view);
}
// Can be called only when is_live_and_has_ttl()
gc_clock::duration ttl() const {
return _view.ttl();
return atomic_cell_type::ttl(_view);
}
// Can be called on live and dead cells
bool has_expired(gc_clock::time_point now) const {
return is_live_and_has_ttl() && expiry() <= now;
}
bytes_view serialize() const {
return _view.serialize();
managed_bytes_view serialize() const {
return _view;
}
};
class atomic_cell_view final : public basic_atomic_cell_view<mutable_view::no> {
atomic_cell_view(const data::type_info& ti, const uint8_t* data)
: basic_atomic_cell_view<mutable_view::no>(ti, data) {}
atomic_cell_view(managed_bytes_view v)
: basic_atomic_cell_view(v) {}
template<mutable_view is_mutable>
atomic_cell_view(data::cell::basic_atomic_cell_view<is_mutable> view)
: basic_atomic_cell_view<mutable_view::no>(view) { }
atomic_cell_view(basic_atomic_cell_view<is_mutable> view)
: basic_atomic_cell_view<mutable_view::no>(view) {}
friend class atomic_cell;
public:
static atomic_cell_view from_bytes(const data::type_info& ti, const imr::utils::object<data::cell::structure>& data) {
return atomic_cell_view(ti, data.get());
static atomic_cell_view from_bytes(const abstract_type& t, managed_bytes_view v) {
return atomic_cell_view(v);
}
static atomic_cell_view from_bytes(const data::type_info& ti, bytes_view bv) {
return atomic_cell_view(ti, reinterpret_cast<const uint8_t*>(bv.begin()));
static atomic_cell_view from_bytes(const abstract_type& t, bytes_view v) {
return atomic_cell_view(managed_bytes_view(v));
}
friend std::ostream& operator<<(std::ostream& os, const atomic_cell_view& acv);
@@ -164,11 +313,11 @@ public:
};
class atomic_cell_mutable_view final : public basic_atomic_cell_view<mutable_view::yes> {
atomic_cell_mutable_view(const data::type_info& ti, uint8_t* data)
: basic_atomic_cell_view<mutable_view::yes>(ti, data) {}
atomic_cell_mutable_view(managed_bytes_mutable_view data)
: basic_atomic_cell_view(data) {}
public:
static atomic_cell_mutable_view from_bytes(const data::type_info& ti, imr::utils::object<data::cell::structure>& data) {
return atomic_cell_mutable_view(ti, data.get());
static atomic_cell_mutable_view from_bytes(const abstract_type& t, managed_bytes_mutable_view v) {
return atomic_cell_mutable_view(v);
}
friend class atomic_cell;
@@ -177,26 +326,31 @@ public:
using atomic_cell_ref = atomic_cell_mutable_view;
class atomic_cell final : public basic_atomic_cell_view<mutable_view::yes> {
using imr_object_type = imr::utils::object<data::cell::structure>;
imr_object_type _data;
atomic_cell(const data::type_info& ti, imr::utils::object<data::cell::structure>&& data)
: basic_atomic_cell_view<mutable_view::yes>(ti, data.get()), _data(std::move(data)) {}
managed_bytes _data;
atomic_cell(managed_bytes b) : _data(std::move(b)) {
set_view(_data);
}
public:
class collection_member_tag;
using collection_member = bool_class<collection_member_tag>;
atomic_cell(atomic_cell&&) = default;
atomic_cell& operator=(const atomic_cell&) = delete;
atomic_cell& operator=(atomic_cell&&) = default;
void swap(atomic_cell& other) noexcept {
basic_atomic_cell_view<mutable_view::yes>::swap(other);
_data.swap(other._data);
atomic_cell(atomic_cell&& o) noexcept : _data(std::move(o._data)) {
set_view(_data);
}
operator atomic_cell_view() const { return atomic_cell_view(_view); }
atomic_cell& operator=(const atomic_cell&) = delete;
atomic_cell& operator=(atomic_cell&& o) {
_data = std::move(o._data);
set_view(_data);
return *this;
}
operator atomic_cell_view() const { return atomic_cell_view(managed_bytes_view(_data)); }
atomic_cell(const abstract_type& t, atomic_cell_view other);
static atomic_cell make_dead(api::timestamp_type timestamp, gc_clock::time_point deletion_time);
static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, bytes_view value,
collection_member = collection_member::no);
static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, managed_bytes_view value,
collection_member = collection_member::no);
static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, ser::buffer_view<bytes_ostream::fragment_iterator> value,
collection_member = collection_member::no);
static atomic_cell make_live(const abstract_type& type, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value,
@@ -208,6 +362,8 @@ public:
static atomic_cell make_live_counter_update(api::timestamp_type timestamp, int64_t value);
static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, bytes_view value,
gc_clock::time_point expiry, gc_clock::duration ttl, collection_member = collection_member::no);
static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, managed_bytes_view value,
gc_clock::time_point expiry, gc_clock::duration ttl, collection_member = collection_member::no);
static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, ser::buffer_view<bytes_ostream::fragment_iterator> value,
gc_clock::time_point expiry, gc_clock::duration ttl, collection_member = collection_member::no);
static atomic_cell make_live(const abstract_type&, api::timestamp_type timestamp, const fragmented_temporary_buffer::view& value,

View File

@@ -52,9 +52,7 @@ struct appending_hash<atomic_cell_view> {
feed_hash(h, cell.timestamp());
if (cell.is_live()) {
if (cdef.is_counter()) {
counter_cell_view::with_linearized(cell, [&] (counter_cell_view ccv) {
::feed_hash(h, ccv);
});
::feed_hash(h, counter_cell_view(cell));
return;
}
if (cell.is_live_and_has_ttl()) {

View File

@@ -26,20 +26,14 @@
#include "schema.hh"
#include "hashing.hh"
#include "imr/utils.hh"
// A variant type that can hold either an atomic_cell, or a serialized collection.
// Which type is stored is determined by the schema.
// Has an "empty" state.
// Objects moved-from are left in an empty state.
class atomic_cell_or_collection final {
// FIXME: This has made us lose small-buffer optimisation. Unfortunately,
// due to the changed cell format it would be less effective now, anyway.
// Measure the actual impact because any attempts to fix this will become
// irrelevant once rows are converted to the IMR as well, so maybe we can
// live with this like that.
using imr_object_type = imr::utils::object<data::cell::structure>;
imr_object_type _data;
managed_bytes _data;
private:
atomic_cell_or_collection(imr::utils::object<data::cell::structure>&& data) : _data(std::move(data)) {}
atomic_cell_or_collection(managed_bytes&& data) : _data(std::move(data)) {}
public:
atomic_cell_or_collection() = default;
atomic_cell_or_collection(atomic_cell_or_collection&&) = default;
@@ -49,20 +43,16 @@ public:
atomic_cell_or_collection(atomic_cell ac) : _data(std::move(ac._data)) {}
atomic_cell_or_collection(const abstract_type& at, atomic_cell_view acv);
static atomic_cell_or_collection from_atomic_cell(atomic_cell data) { return { std::move(data._data) }; }
atomic_cell_view as_atomic_cell(const column_definition& cdef) const { return atomic_cell_view::from_bytes(cdef.type->imr_state().type_info(), _data); }
atomic_cell_ref as_atomic_cell_ref(const column_definition& cdef) { return atomic_cell_mutable_view::from_bytes(cdef.type->imr_state().type_info(), _data); }
atomic_cell_mutable_view as_mutable_atomic_cell(const column_definition& cdef) { return atomic_cell_mutable_view::from_bytes(cdef.type->imr_state().type_info(), _data); }
atomic_cell_view as_atomic_cell(const column_definition& cdef) const { return atomic_cell_view::from_bytes(*cdef.type, _data); }
atomic_cell_mutable_view as_mutable_atomic_cell(const column_definition& cdef) { return atomic_cell_mutable_view::from_bytes(*cdef.type, _data); }
atomic_cell_or_collection(collection_mutation cm) : _data(std::move(cm._data)) { }
atomic_cell_or_collection copy(const abstract_type&) const;
explicit operator bool() const {
return bool(_data);
return !_data.empty();
}
static constexpr bool can_use_mutable_view() {
return true;
}
void swap(atomic_cell_or_collection& other) noexcept {
_data.swap(other._data);
}
static atomic_cell_or_collection from_collection_mutation(collection_mutation data) { return std::move(data._data); }
collection_mutation_view as_collection_mutation() const;
bytes_view serialize() const;
@@ -82,12 +72,3 @@ public:
};
friend std::ostream& operator<<(std::ostream&, const printer&);
};
namespace std {
inline void swap(atomic_cell_or_collection& a, atomic_cell_or_collection& b) noexcept
{
a.swap(b);
}
}

View File

@@ -979,13 +979,13 @@ private:
};
static bytes get_bytes(const atomic_cell_view& acv) {
return acv.value().linearize();
return to_bytes(acv.value());
}
static bytes_view get_bytes_view(const atomic_cell_view& acv, std::vector<bytes>& buf) {
return acv.value().is_fragmented()
? bytes_view{buf.emplace_back(acv.value().linearize())}
: acv.value().first_fragment();
? bytes_view{buf.emplace_back(to_bytes(acv.value()))}
: acv.value().current_fragment();
}
static ttl_opt get_ttl(const atomic_cell_view& acv) {

View File

@@ -22,7 +22,6 @@
#include "types/collection.hh"
#include "types/user.hh"
#include "concrete_types.hh"
#include "atomic_cell_or_collection.hh"
#include "mutation_partition.hh"
#include "compaction_garbage_collector.hh"
#include "combine.hh"
@@ -30,40 +29,28 @@
#include "collection_mutation.hh"
collection_mutation::collection_mutation(const abstract_type& type, collection_mutation_view v)
: _data(imr_object_type::make(data::cell::make_collection(v.data), &type.imr_state().lsa_migrator())) {}
: _data(v.data) {}
collection_mutation::collection_mutation(const abstract_type& type, const bytes_ostream& data)
: _data(imr_object_type::make(data::cell::make_collection(fragment_range_view(data)), &type.imr_state().lsa_migrator())) {}
static collection_mutation_view get_collection_mutation_view(const uint8_t* ptr)
{
auto f = data::cell::structure::get_member<data::cell::tags::flags>(ptr);
auto ti = data::type_info::make_collection();
data::cell::context ctx(f, ti);
auto view = data::cell::structure::get_member<data::cell::tags::cell>(ptr).as<data::cell::tags::collection>(ctx);
auto dv = data::cell::variable_value::make_view(view, f.get<data::cell::tags::external_data>());
return collection_mutation_view { dv };
}
collection_mutation::collection_mutation(const abstract_type& type, managed_bytes data)
: _data(std::move(data)) {}
collection_mutation::operator collection_mutation_view() const
{
return get_collection_mutation_view(_data.get());
return collection_mutation_view{managed_bytes_view(_data)};
}
collection_mutation_view atomic_cell_or_collection::as_collection_mutation() const {
return get_collection_mutation_view(_data.get());
return collection_mutation_view{managed_bytes_view(_data)};
}
bool collection_mutation_view::is_empty() const {
auto in = collection_mutation_input_stream(data);
auto in = collection_mutation_input_stream(fragment_range(data));
auto has_tomb = in.read_trivial<bool>();
return !has_tomb && in.read_trivial<uint32_t>() == 0;
}
template <typename F>
requires std::is_invocable_r_v<const data::type_info&, F, collection_mutation_input_stream&>
static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_clock::time_point now, F&& read_cell_type_info) {
auto in = collection_mutation_input_stream(data);
bool collection_mutation_view::is_any_live(const abstract_type& type, tombstone tomb, gc_clock::time_point now) const {
auto in = collection_mutation_input_stream(fragment_range(data));
auto has_tomb = in.read_trivial<bool>();
if (has_tomb) {
auto ts = in.read_trivial<api::timestamp_type>();
@@ -73,9 +60,10 @@ static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_c
auto nr = in.read_trivial<uint32_t>();
for (uint32_t i = 0; i != nr; ++i) {
auto& type_info = read_cell_type_info(in);
auto key_size = in.read_trivial<uint32_t>();
in.skip(key_size);
auto vsize = in.read_trivial<uint32_t>();
auto value = atomic_cell_view::from_bytes(type_info, in.read(vsize));
auto value = atomic_cell_view::from_bytes(type, in.read(vsize));
if (value.is_live(tomb, now, false)) {
return true;
}
@@ -84,33 +72,8 @@ static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_c
return false;
}
bool collection_mutation_view::is_any_live(const abstract_type& type, tombstone tomb, gc_clock::time_point now) const {
return visit(type, make_visitor(
[&] (const collection_type_impl& ctype) {
auto& type_info = ctype.value_comparator()->imr_state().type_info();
return ::is_any_live(data, tomb, now, [&type_info] (collection_mutation_input_stream& in) -> const data::type_info& {
auto key_size = in.read_trivial<uint32_t>();
in.skip(key_size);
return type_info;
});
},
[&] (const user_type_impl& utype) {
return ::is_any_live(data, tomb, now, [&utype] (collection_mutation_input_stream& in) -> const data::type_info& {
auto key_size = in.read_trivial<uint32_t>();
auto key = in.read(key_size);
return utype.type(deserialize_field_index(key))->imr_state().type_info();
});
},
[&] (const abstract_type& o) -> bool {
throw std::runtime_error(format("collection_mutation_view::is_any_live: unknown type {}", o.name()));
}
));
}
template <typename F>
requires std::is_invocable_r_v<const data::type_info&, F, collection_mutation_input_stream&>
static api::timestamp_type last_update(const atomic_cell_value_view& data, F&& read_cell_type_info) {
auto in = collection_mutation_input_stream(data);
api::timestamp_type collection_mutation_view::last_update(const abstract_type& type) const {
auto in = collection_mutation_input_stream(fragment_range(data));
api::timestamp_type max = api::missing_timestamp;
auto has_tomb = in.read_trivial<bool>();
if (has_tomb) {
@@ -120,39 +83,16 @@ static api::timestamp_type last_update(const atomic_cell_value_view& data, F&& r
auto nr = in.read_trivial<uint32_t>();
for (uint32_t i = 0; i != nr; ++i) {
auto& type_info = read_cell_type_info(in);
const auto key_size = in.read_trivial<uint32_t>();
in.skip(key_size);
auto vsize = in.read_trivial<uint32_t>();
auto value = atomic_cell_view::from_bytes(type_info, in.read(vsize));
auto value = atomic_cell_view::from_bytes(type, in.read(vsize));
max = std::max(value.timestamp(), max);
}
return max;
}
api::timestamp_type collection_mutation_view::last_update(const abstract_type& type) const {
return visit(type, make_visitor(
[&] (const collection_type_impl& ctype) {
auto& type_info = ctype.value_comparator()->imr_state().type_info();
return ::last_update(data, [&type_info] (collection_mutation_input_stream& in) -> const data::type_info& {
auto key_size = in.read_trivial<uint32_t>();
in.skip(key_size);
return type_info;
});
},
[&] (const user_type_impl& utype) {
return ::last_update(data, [&utype] (collection_mutation_input_stream& in) -> const data::type_info& {
auto key_size = in.read_trivial<uint32_t>();
auto key = in.read(key_size);
return utype.type(deserialize_field_index(key))->imr_state().type_info();
});
},
[&] (const abstract_type& o) -> api::timestamp_type {
throw std::runtime_error(format("collection_mutation_view::last_update: unknown type {}", o.name()));
}
));
}
std::ostream& operator<<(std::ostream& os, const collection_mutation_view::printer& cmvp) {
fmt::print(os, "{{collection_mutation_view ");
cmvp._cmv.with_deserialized(cmvp._type, [&os, &type = cmvp._type] (const collection_mutation_view_description& cmvd) {
@@ -278,28 +218,31 @@ static collection_mutation serialize_collection_mutation(
auto size = accumulate(cells, (size_t)4, element_size);
size += 1;
if (tomb) {
size += sizeof(tomb.timestamp) + sizeof(tomb.deletion_time);
size += sizeof(int64_t) + sizeof(int64_t);
}
bytes_ostream ret;
ret.reserve(size);
auto out = ret.write_begin();
*out++ = bool(tomb);
managed_bytes ret(managed_bytes::initialized_later(), size);
managed_bytes_mutable_view out(ret);
write<uint8_t>(out, uint8_t(bool(tomb)));
if (tomb) {
write(out, tomb.timestamp);
write(out, tomb.deletion_time.time_since_epoch().count());
write<int64_t>(out, tomb.timestamp);
write<int64_t>(out, tomb.deletion_time.time_since_epoch().count());
}
auto writeb = [&out] (bytes_view v) {
serialize_int32(out, v.size());
out = std::copy_n(v.begin(), v.size(), out);
auto writek = [&out] (bytes_view v) {
write<int32_t>(out, v.size());
write_fragmented(out, single_fragmented_view(v));
};
auto writev = [&out] (managed_bytes_view v) {
write<int32_t>(out, v.size());
write_fragmented(out, v);
};
// FIXME: overflow?
serialize_int32(out, boost::distance(cells));
write<int32_t>(out, boost::distance(cells));
for (auto&& kv : cells) {
auto&& k = kv.first;
auto&& v = kv.second;
writeb(k);
writek(k);
writeb(v.serialize());
writev(v.serialize());
}
return collection_mutation(type, ret);
}
@@ -448,13 +391,12 @@ deserialize_collection_mutation(const abstract_type& type, collection_mutation_i
return visit(type, make_visitor(
[&] (const collection_type_impl& ctype) {
// value_comparator(), ugh
auto& type_info = ctype.value_comparator()->imr_state().type_info();
return deserialize_collection_mutation(in, [&type_info] (collection_mutation_input_stream& in) {
return deserialize_collection_mutation(in, [&ctype] (collection_mutation_input_stream& in) {
// FIXME: we could probably avoid the need for size
auto ksize = in.read_trivial<uint32_t>();
auto key = in.read(ksize);
auto vsize = in.read_trivial<uint32_t>();
auto value = atomic_cell_view::from_bytes(type_info, in.read(vsize));
auto value = atomic_cell_view::from_bytes(*ctype.value_comparator(), in.read(vsize));
return std::make_pair(key, value);
});
},
@@ -464,8 +406,7 @@ deserialize_collection_mutation(const abstract_type& type, collection_mutation_i
auto ksize = in.read_trivial<uint32_t>();
auto key = in.read(ksize);
auto vsize = in.read_trivial<uint32_t>();
auto value = atomic_cell_view::from_bytes(
utype.type(deserialize_field_index(key))->imr_state().type_info(), in.read(vsize));
auto value = atomic_cell_view::from_bytes(*utype.type(deserialize_field_index(key)), in.read(vsize));
return std::make_pair(key, value);
});
},

View File

@@ -31,7 +31,6 @@
#include <iosfwd>
class abstract_type;
class bytes_ostream;
class compaction_garbage_collector;
class row_tombstone;
@@ -70,7 +69,7 @@ struct collection_mutation_view_description {
collection_mutation serialize(const abstract_type&) const;
};
using collection_mutation_input_stream = utils::linearizing_input_stream<atomic_cell_value_view, marshal_exception>;
using collection_mutation_input_stream = utils::linearizing_input_stream<fragment_range<managed_bytes_view>, marshal_exception>;
// Given a linearized collection_mutation_view, returns an auxiliary struct allowing the inspection of each cell.
// The struct is an observer of the data given by the collection_mutation_view and is only valid while the
@@ -80,7 +79,7 @@ collection_mutation_view_description deserialize_collection_mutation(const abstr
class collection_mutation_view {
public:
atomic_cell_value_view data;
managed_bytes_view data;
// Is this a noop mutation?
bool is_empty() const;
@@ -97,7 +96,7 @@ public:
// calls it on the corresponding description of `this`.
template <typename F>
inline decltype(auto) with_deserialized(const abstract_type& type, F f) const {
auto stream = collection_mutation_input_stream(data);
auto stream = collection_mutation_input_stream(fragment_range(data));
return f(deserialize_collection_mutation(type, stream));
}
@@ -122,12 +121,11 @@ public:
// The mutation may also contain a collection-wide tombstone.
class collection_mutation {
public:
using imr_object_type = imr::utils::object<data::cell::structure>;
imr_object_type _data;
managed_bytes _data;
collection_mutation() {}
collection_mutation(const abstract_type&, collection_mutation_view);
collection_mutation(const abstract_type& type, const bytes_ostream& data);
collection_mutation(const abstract_type&, managed_bytes);
operator collection_mutation_view() const;
};

View File

@@ -338,7 +338,6 @@ scylla_tests = set([
'test/boost/hash_test',
'test/boost/hashers_test',
'test/boost/idl_test',
'test/boost/imr_test',
'test/boost/input_stream_test',
'test/boost/json_cql_query_test',
'test/boost/json_test',
@@ -356,7 +355,6 @@ scylla_tests = set([
'test/boost/intrusive_array_test',
'test/boost/map_difference_test',
'test/boost/memtable_test',
'test/boost/meta_test',
'test/boost/multishard_mutation_query_test',
'test/boost/murmur_hash_test',
'test/boost/mutation_fragment_test',
@@ -413,7 +411,6 @@ scylla_tests = set([
'test/boost/btree_test',
'test/boost/double_decker_test',
'test/boost/stall_free_test',
'test/boost/imr_test',
'test/boost/raft_sys_table_storage_test',
'test/manual/ec2_snitch_test',
'test/manual/enormous_table_scan_test',
@@ -861,7 +858,6 @@ scylla_core = (['database.cc',
'vint-serialization.cc',
'utils/arch/powerpc/crc32-vpmsum/crc32_wrapper.cc',
'querier.cc',
'data/cell.cc',
'mutation_writer/multishard_writer.cc',
'multishard_mutation_query.cc',
'reader_concurrency_semaphore.cc',
@@ -1036,7 +1032,6 @@ pure_boost_tests = set([
'test/boost/like_matcher_test',
'test/boost/linearizing_input_stream_test',
'test/boost/map_difference_test',
'test/boost/meta_test',
'test/boost/nonwrapping_range_test',
'test/boost/observable_test',
'test/boost/range_test',
@@ -1112,8 +1107,6 @@ deps['test/boost/estimated_histogram_test'] = ['test/boost/estimated_histogram_t
deps['test/boost/anchorless_list_test'] = ['test/boost/anchorless_list_test.cc']
deps['test/perf/perf_fast_forward'] += ['release.cc']
deps['test/perf/perf_simple_query'] += ['release.cc']
deps['test/boost/meta_test'] = ['test/boost/meta_test.cc']
deps['test/boost/imr_test'] = ['test/boost/imr_test.cc', 'utils/logalloc.cc', 'utils/dynamic_bitset.cc']
deps['test/boost/reusable_buffer_test'] = [
"test/boost/reusable_buffer_test.cc",
"test/lib/log.cc",

View File

@@ -36,9 +36,9 @@ converting_mutation_partition_applier::upgrade_cell(const abstract_type& new_typ
atomic_cell::collection_member cm) {
if (cell.is_live() && !old_type.is_counter()) {
if (cell.is_live_and_has_ttl()) {
return atomic_cell::make_live(new_type, cell.timestamp(), cell.value().linearize(), cell.expiry(), cell.ttl(), cm);
return atomic_cell::make_live(new_type, cell.timestamp(), cell.value(), cell.expiry(), cell.ttl(), cm);
}
return atomic_cell::make_live(new_type, cell.timestamp(), cell.value().linearize(), cm);
return atomic_cell::make_live(new_type, cell.timestamp(), cell.value(), cm);
} else {
return atomic_cell(new_type, cell);
}

View File

@@ -118,16 +118,14 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll
assert(!dst_ac.is_counter_update());
assert(!src_ac.is_counter_update());
with_linearized(dst_ac, [&] (counter_cell_view dst_ccv) {
with_linearized(src_ac, [&] (counter_cell_view src_ccv) {
auto src_ccv = counter_cell_view(src_ac);
auto dst_ccv = counter_cell_view(dst_ac);
if (dst_ccv.shard_count() >= src_ccv.shard_count()) {
auto dst_amc = dst.as_mutable_atomic_cell(cdef);
auto src_amc = src.as_mutable_atomic_cell(cdef);
if (!dst_amc.is_value_fragmented() && !src_amc.is_value_fragmented()) {
if (apply_in_place(cdef, dst_amc, src_amc)) {
return;
}
if (apply_in_place(cdef, dst_amc, src_amc)) {
return;
}
}
@@ -142,8 +140,6 @@ void counter_cell_view::apply(const column_definition& cdef, atomic_cell_or_coll
auto cell = result.build(std::max(dst_ac.timestamp(), src_ac.timestamp()));
src = std::exchange(dst, atomic_cell_or_collection(std::move(cell)));
});
});
}
std::optional<atomic_cell> counter_cell_view::difference(atomic_cell_view a, atomic_cell_view b)
@@ -158,8 +154,8 @@ std::optional<atomic_cell> counter_cell_view::difference(atomic_cell_view a, ato
return { };
}
return with_linearized(a, [&] (counter_cell_view a_ccv) {
return with_linearized(b, [&] (counter_cell_view b_ccv) {
auto a_ccv = counter_cell_view(a);
auto b_ccv = counter_cell_view(b);
auto a_shards = a_ccv.shards();
auto b_shards = b_ccv.shards();
@@ -186,8 +182,6 @@ std::optional<atomic_cell> counter_cell_view::difference(atomic_cell_view a, ato
diff = atomic_cell::make_live(*counter_type, a.timestamp(), bytes_view());
}
return diff;
});
});
}
@@ -225,14 +219,13 @@ void transform_counter_updates_to_shards(mutation& m, const mutation* current_st
if (!acv.is_live()) {
return; // continue -- we are in lambda
}
counter_cell_view::with_linearized(acv, [&] (counter_cell_view ccv) {
auto ccv = counter_cell_view(acv);
auto cs = ccv.get_shard(counter_id(local_id));
if (!cs) {
return; // continue
}
shards.emplace_back(std::make_pair(id, counter_shard(*cs)));
});
});
transformee.for_each_cell([&] (column_id id, atomic_cell_or_collection& ac_o_c) {
auto& cdef = s.column_at(kind, id);

View File

@@ -81,21 +81,20 @@ class basic_counter_shard_view {
total_size = unsigned(logical_clock) + sizeof(int64_t),
};
private:
using pointer_type = std::conditional_t<is_mutable == mutable_view::no, const signed char*, signed char*>;
pointer_type _base;
managed_bytes_basic_view<is_mutable> _base;
private:
template<typename T>
T read(offset off) const {
T value;
std::copy_n(_base + static_cast<unsigned>(off), sizeof(T), reinterpret_cast<signed char*>(&value));
return value;
auto v = _base;
v.remove_prefix(size_t(off));
return read_simple_native<T>(v);
}
public:
static constexpr auto size = size_t(offset::total_size);
public:
basic_counter_shard_view() = default;
explicit basic_counter_shard_view(pointer_type ptr) noexcept
: _base(ptr) { }
explicit basic_counter_shard_view(managed_bytes_basic_view<is_mutable> v) noexcept
: _base(v) { }
counter_id id() const { return read<counter_id>(offset::id); }
int64_t value() const { return read<int64_t>(offset::value); }
@@ -106,15 +105,24 @@ public:
static constexpr size_t size = size_t(offset::total_size) - off;
signed char tmp[size];
std::copy_n(_base + off, size, tmp);
std::copy_n(other._base + off, size, _base + off);
std::copy_n(tmp, size, other._base + off);
auto tmp_view = single_fragmented_mutable_view(bytes_mutable_view(std::data(tmp), std::size(tmp)));
managed_bytes_mutable_view this_view = _base.substr(off, size);
managed_bytes_mutable_view other_view = other._base.substr(off, size);
copy_fragmented_view(tmp_view, this_view);
copy_fragmented_view(this_view, other_view);
copy_fragmented_view(other_view, tmp_view);
}
void set_value_and_clock(const basic_counter_shard_view& other) noexcept {
static constexpr size_t off = size_t(offset::value);
static constexpr size_t size = size_t(offset::total_size) - off;
std::copy_n(other._base + off, size, _base + off);
managed_bytes_mutable_view this_view = _base.substr(off, size);
managed_bytes_mutable_view other_view = other._base.substr(off, size);
copy_fragmented_view(this_view, other_view);
}
bool operator==(const basic_counter_shard_view& other) const {
@@ -140,11 +148,6 @@ class counter_shard {
counter_id _id;
int64_t _value;
int64_t _logical_clock;
private:
template<typename T>
static void write(const T& value, bytes::iterator& out) {
out = std::copy_n(reinterpret_cast<const signed char*>(&value), sizeof(T), out);
}
private:
// Shared logic for applying counter_shards and counter_shard_views.
// T is either counter_shard or basic_counter_shard_view<U>.
@@ -195,10 +198,10 @@ public:
static constexpr size_t serialized_size() {
return counter_shard_view::size;
}
void serialize(bytes::iterator& out) const {
write(_id, out);
write(_value, out);
write(_logical_clock, out);
void serialize(atomic_cell_value_mutable_view& out) const {
write_native<counter_id>(out, _id);
write_native<int64_t>(out, _value);
write_native<int64_t>(out, _logical_clock);
}
};
@@ -235,7 +238,7 @@ public:
size_t serialized_size() const {
return _shards.size() * counter_shard::serialized_size();
}
void serialize(bytes::iterator& out) const {
void serialize(atomic_cell_value_mutable_view& out) const {
for (auto&& cs : _shards) {
cs.serialize(out);
}
@@ -246,31 +249,18 @@ public:
}
atomic_cell build(api::timestamp_type timestamp) const {
// If we can assume that the counter shards never cross fragment boundaries
// the serialisation code gets much simpler.
static_assert(data::cell::maximum_external_chunk_length % counter_shard::serialized_size() == 0);
auto ac = atomic_cell::make_live_uninitialized(*counter_type, timestamp, serialized_size());
auto dst_it = ac.value().begin();
auto dst_current = *dst_it++;
auto dst = ac.value();
for (auto&& cs : _shards) {
if (dst_current.empty()) {
dst_current = *dst_it++;
}
assert(!dst_current.empty());
auto value_dst = dst_current.data();
cs.serialize(value_dst);
dst_current.remove_prefix(counter_shard::serialized_size());
cs.serialize(dst);
}
return ac;
}
static atomic_cell from_single_shard(api::timestamp_type timestamp, const counter_shard& cs) {
// We don't really need to bother with fragmentation here.
static_assert(data::cell::maximum_external_chunk_length >= counter_shard::serialized_size());
auto ac = atomic_cell::make_live_uninitialized(*counter_type, timestamp, counter_shard::serialized_size());
auto dst = ac.value().first_fragment().begin();
auto dst = ac.value();
cs.serialize(dst);
return ac;
}
@@ -309,12 +299,7 @@ public:
template<mutable_view is_mutable>
class basic_counter_cell_view {
protected:
using linearized_value_view = std::conditional_t<is_mutable == mutable_view::no,
bytes_view, bytes_mutable_view>;
using pointer_type = std::conditional_t<is_mutable == mutable_view::no,
bytes_view::const_pointer, bytes_mutable_view::pointer>;
basic_atomic_cell_view<is_mutable> _cell;
linearized_value_view _value;
private:
class shard_iterator {
public:
@@ -324,12 +309,12 @@ private:
using pointer = basic_counter_shard_view<is_mutable>*;
using reference = basic_counter_shard_view<is_mutable>&;
private:
pointer_type _current;
managed_bytes_basic_view<is_mutable> _current;
basic_counter_shard_view<is_mutable> _current_view;
size_t _pos = 0;
public:
shard_iterator() = default;
shard_iterator(pointer_type ptr) noexcept
: _current(ptr), _current_view(ptr) { }
shard_iterator(managed_bytes_basic_view<is_mutable> v, size_t offset) noexcept
: _current(v), _current_view(_current), _pos(offset) { }
basic_counter_shard_view<is_mutable>& operator*() noexcept {
return _current_view;
@@ -338,8 +323,8 @@ private:
return &_current_view;
}
shard_iterator& operator++() noexcept {
_current += counter_shard_view::size;
_current_view = basic_counter_shard_view<is_mutable>(_current);
_pos += counter_shard_view::size;
_current_view = basic_counter_shard_view<is_mutable>(_current.substr(_pos, counter_shard_view::size));
return *this;
}
shard_iterator operator++(int) noexcept {
@@ -348,8 +333,8 @@ private:
return it;
}
shard_iterator& operator--() noexcept {
_current -= counter_shard_view::size;
_current_view = basic_counter_shard_view<is_mutable>(_current);
_pos -= counter_shard_view::size;
_current_view = basic_counter_shard_view<is_mutable>(_current.substr(_pos, counter_shard_view::size));
return *this;
}
shard_iterator operator--(int) noexcept {
@@ -358,31 +343,29 @@ private:
return it;
}
bool operator==(const shard_iterator& other) const noexcept {
return _current == other._current;
}
bool operator!=(const shard_iterator& other) const noexcept {
return !(*this == other);
return _pos == other._pos;
}
};
public:
boost::iterator_range<shard_iterator> shards() const {
auto begin = shard_iterator(_value.data());
auto end = shard_iterator(_value.data() + _value.size());
auto value = _cell.value();
auto begin = shard_iterator(value, 0);
auto end = shard_iterator(value, value.size());
return boost::make_iterator_range(begin, end);
}
size_t shard_count() const {
return _cell.value().size_bytes() / counter_shard_view::size;
return _cell.value().size() / counter_shard_view::size;
}
protected:
public:
// ac must be a live counter cell
explicit basic_counter_cell_view(basic_atomic_cell_view<is_mutable> ac, linearized_value_view vv) noexcept
: _cell(ac), _value(vv)
explicit basic_counter_cell_view(basic_atomic_cell_view<is_mutable> ac) noexcept
: _cell(ac)
{
assert(_cell.is_live());
assert(!_cell.is_counter_update());
}
public:
api::timestamp_type timestamp() const { return _cell.timestamp(); }
static data_type total_value_type() { return long_type; }
@@ -411,14 +394,6 @@ public:
struct counter_cell_view : basic_counter_cell_view<mutable_view::no> {
using basic_counter_cell_view::basic_counter_cell_view;
template<typename Function>
static decltype(auto) with_linearized(basic_atomic_cell_view<mutable_view::no> ac, Function&& fn) {
return ac.value().with_linearized([&] (bytes_view value_view) {
counter_cell_view ccv(ac, value_view);
return fn(ccv);
});
}
// Reversibly applies two counter cells, at least one of them must be live.
static void apply(const column_definition& cdef, atomic_cell_or_collection& dst, atomic_cell_or_collection& src);
@@ -433,9 +408,8 @@ struct counter_cell_mutable_view : basic_counter_cell_view<mutable_view::yes> {
using basic_counter_cell_view::basic_counter_cell_view;
explicit counter_cell_mutable_view(atomic_cell_mutable_view ac) noexcept
: basic_counter_cell_view<mutable_view::yes>(ac, ac.value().first_fragment())
: basic_counter_cell_view<mutable_view::yes>(ac)
{
assert(!ac.value().is_fragmented());
}
void set_timestamp(api::timestamp_type ts) { _cell.set_timestamp(ts); }

View File

@@ -63,7 +63,7 @@ bytes_opt do_get_value(const schema& schema,
}
assert(cdef.is_atomic());
auto c = cell->as_atomic_cell(cdef);
return c.is_dead(now) ? std::nullopt : bytes_opt(c.value().linearize());
return c.is_dead(now) ? std::nullopt : bytes_opt(to_bytes(c.value()));
}
}

View File

@@ -24,6 +24,7 @@
#include "error_injection_fcts.hh"
#include "utils/error_injection.hh"
#include "types/list.hh"
#include <seastar/core/map_reduce.hh>
namespace cql3
{

View File

@@ -1,52 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "data/cell.hh"
#include "types.hh"
thread_local imr::alloc::context_factory<data::cell::last_chunk_context> lcc;
thread_local imr::alloc::lsa_migrate_fn<data::cell::external_last_chunk,
imr::alloc::context_factory<data::cell::last_chunk_context>> data::cell::lsa_last_chunk_migrate_fn(lcc);
thread_local imr::alloc::context_factory<data::cell::chunk_context> ecc;
thread_local imr::alloc::lsa_migrate_fn<data::cell::external_chunk,
imr::alloc::context_factory<data::cell::chunk_context>> data::cell::lsa_chunk_migrate_fn(ecc);
int compare_unsigned(data::value_view lhs, data::value_view rhs) noexcept
{
auto it1 = lhs.begin();
auto it2 = rhs.begin();
while (it1 != lhs.end() && it2 != rhs.end()) {
auto r = ::compare_unsigned(*it1, *it2);
if (r) {
return r;
}
++it1;
++it2;
}
if (it1 != lhs.end()) {
return 1;
} else if (it2 != rhs.end()) {
return -1;
}
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,75 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "data/cell.hh"
namespace data {
template <typename T>
using value_writer = cell::value_writer<T>;
inline value_writer<empty_fragment_range> cell::variable_value::write(size_t value_size, bool force_internal) noexcept
{
static_assert(imr::WriterAllocator<value_writer<empty_fragment_range>, structure>);
return value_writer<empty_fragment_range>(empty_fragment_range(), value_size, force_internal);
}
template<typename FragmentRange>
inline value_writer<std::decay_t<FragmentRange>> cell::variable_value::write(FragmentRange&& value, bool force_internal) noexcept
{
static_assert(imr::WriterAllocator<value_writer<std::decay_t<FragmentRange>>, structure>);
return value_writer<std::decay_t<FragmentRange>>(std::forward<FragmentRange>(value), value.size_bytes(), force_internal);
}
inline auto cell::variable_value::write(bytes_view value, bool force_internal) noexcept
{
return write(single_fragment_range(value), force_internal);
}
template<mutable_view is_mutable>
inline basic_value_view<is_mutable> cell::variable_value::do_make_view(structure::basic_view<is_mutable> view, bool external_storage)
{
auto size = view.template get<tags::value_size>().load();
context ctx(external_storage, size);
return view.template get<tags::value_data>().visit(make_visitor(
[&] (imr::pod<uint8_t*>::view ptr) {
auto ex_ptr = static_cast<uint8_t*>(ptr.load());
if (size > cell::effective_external_chunk_length) {
auto ex_ctx = chunk_context(ex_ptr);
auto ex_view = external_chunk::make_view(ex_ptr, ex_ctx);
auto next = static_cast<uint8_t*>(ex_view.get<tags::chunk_next>().load());
return basic_value_view<is_mutable>(ex_view.get<tags::chunk_data>(ex_ctx), size - cell::effective_external_chunk_length, next);
} else {
auto ex_ctx = last_chunk_context(ex_ptr);
auto ex_view = external_last_chunk::make_view(ex_ptr, ex_ctx);
assert(ex_view.get<tags::chunk_data>(ex_ctx).size() == size);
return basic_value_view<is_mutable>(ex_view.get<tags::chunk_data>(ex_ctx), 0, nullptr);
}
},
[] (imr::buffer<tags::data>::basic_view<is_mutable> data) {
return basic_value_view<is_mutable>(data, 0, nullptr);
}
), ctx);
}
}

View File

@@ -1,65 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <cstddef>
namespace data {
/// Type information
///
/// `type_info` keeps the type information relevant for the serialisation code.
/// In particular we need to distinguish between fixed-size and variable-sized
/// types. Collections and counters are considered to be variable-sized types.
///
/// \note Even if the type is fixed-size (e.g. `int32_type`) the value can be
/// empty and its length will be 0. This is a special (and rare) case handled
/// by the cell implementation and ignored by `type_info`.
class type_info {
size_t _fixed_size;
private:
explicit type_info(size_t size) noexcept : _fixed_size(size) { }
public:
static type_info make_fixed_size(size_t size) noexcept {
return type_info { size_t(size) };
}
static type_info make_variable_size() noexcept {
return type_info { 0 };
}
static type_info make_collection() noexcept {
return type_info { 0 };
}
/// Check whether the type is fixed-size.
bool is_fixed_size() const noexcept {
return _fixed_size > 0;
}
/// Get the size of the value of a fixed-size type.
///
/// Valid only if `is_fixed_size()` returns `true`.
size_t value_size() const noexcept {
return _fixed_size;
}
};
}

View File

@@ -1,137 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "utils/fragment_range.hh"
namespace data {
/// View of a cell value
///
/// `basic_value_view` is a non-owning reference to a, possibly fragmented,
/// opaque value of a cell. It behaves like an immutable range of fragments.
///
/// Moreover, there are functions that linearise the value in order to ease the
/// integration with the pre-existing code. Nevertheless, using them should be
/// avoided.
///
/// \note For now `basic_value_view` is used by regular atomic cells, counters
/// and collections. This is due to the fact that counters and collections
/// haven't been fully transitioned to the IMR yet and still use custom
/// serialisation formats. Once this is resolved `value_view` can be used
/// exclusively by regular atomic cells.
template<mutable_view is_mutable>
class basic_value_view {
public:
using fragment_type = std::conditional_t<is_mutable == mutable_view::no,
bytes_view, bytes_mutable_view>;
using raw_pointer_type = std::conditional_t<is_mutable == mutable_view::no,
const uint8_t*, uint8_t*>;
private:
size_t _remaining_size;
fragment_type _first_fragment;
raw_pointer_type _next;
public:
basic_value_view(fragment_type first, size_t remaining_size, raw_pointer_type next)
: _remaining_size(remaining_size), _first_fragment(first), _next(next)
{ }
explicit basic_value_view(fragment_type first)
: basic_value_view(first, 0, nullptr)
{ }
/// Iterator over fragments
class iterator {
fragment_type _view;
raw_pointer_type _next;
size_t _left;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = fragment_type;
using pointer = const fragment_type*;
using reference = const fragment_type&;
using difference_type = std::ptrdiff_t;
iterator(fragment_type bv, size_t total, raw_pointer_type next) noexcept
: _view(bv), _next(next), _left(total) { }
const fragment_type& operator*() const {
return _view;
}
const fragment_type* operator->() const {
return &_view;
}
iterator& operator++();
iterator operator++(int) {
auto it = *this;
operator++();
return it;
}
bool operator==(const iterator& other) const {
return _view.data() == other._view.data();
}
bool operator!=(const iterator& other) const {
return !(*this == other);
}
};
using const_iterator = iterator;
auto begin() const {
return iterator(_first_fragment, _remaining_size, _next);
}
auto end() const {
return iterator(fragment_type(), 0, nullptr);
}
bool operator==(const basic_value_view& other) const noexcept;
bool operator==(bytes_view bv) const noexcept;
/// Total size of the value
size_t size_bytes() const noexcept {
return _first_fragment.size() + _remaining_size;
}
bool empty() const noexcept {
return _first_fragment.empty();
}
bool is_fragmented() const noexcept {
return bool(_next);
}
fragment_type first_fragment() const noexcept {
return _first_fragment;
}
bytes linearize() const;
template<typename Function>
decltype(auto) with_linearized(Function&& fn) const;
};
using value_view = basic_value_view<mutable_view::no>;
using value_mutable_view = basic_value_view<mutable_view::yes>;
}

View File

@@ -1,116 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "data/cell.hh"
namespace data {
template<mutable_view is_mutable>
inline typename basic_value_view<is_mutable>::iterator& basic_value_view<is_mutable>::iterator::operator++()
{
if (!_next) {
_view = fragment_type();
} else if (_left > cell::effective_external_chunk_length) {
cell::chunk_context ctx(_next);
auto v = cell::external_chunk::make_view(_next, ctx);
_next = static_cast<uint8_t*>(v.template get<cell::tags::chunk_next>(ctx).load());
_view = v.template get<cell::tags::chunk_data>(ctx);
_left -= cell::effective_external_chunk_length;
} else {
cell::last_chunk_context ctx(_next);
auto v = cell::external_last_chunk::make_view(_next, ctx);
_view = v.template get<cell::tags::chunk_data>(ctx);
_next = nullptr;
}
return *this;
}
template<mutable_view is_mutable>
inline bool basic_value_view<is_mutable>::operator==(const basic_value_view& other) const noexcept
{
// We can assume that all values are fragmented exactly in the same way.
auto it1 = begin();
auto it2 = other.begin();
while (it1 != end() && it2 != other.end()) {
if (*it1 != *it2) {
return false;
}
++it1;
++it2;
}
return it1 == end() && it2 == other.end();
}
template<mutable_view is_mutable>
inline bool basic_value_view<is_mutable>::operator==(bytes_view bv) const noexcept
{
bool equal = true;
using boost::range::for_each;
for_each(*this, [&] (bytes_view fragment) {
if (fragment.size() > bv.size()) {
equal = false;
} else {
auto bv_frag = bv.substr(0, fragment.size());
equal = equal && fragment == bv_frag;
bv.remove_prefix(fragment.size());
}
});
return equal && bv.empty();
}
template<mutable_view is_mutable>
inline bytes basic_value_view<is_mutable>::linearize() const
{
bytes b(bytes::initialized_later(), size_bytes());
auto it = b.begin();
for (auto fragment : *this) {
it = boost::copy(fragment, it);
}
return b;
}
template<mutable_view is_mutable>
template<typename Function>
inline decltype(auto) basic_value_view<is_mutable>::with_linearized(Function&& fn) const
{
bytes b;
bytes_view bv;
if (is_fragmented()) {
b = linearize();
bv = b;
} else {
bv = _first_fragment;
}
return fn(bv);
}
inline std::ostream& operator<<(std::ostream& os, value_view vv)
{
using boost::range::for_each;
for_each(vv, [&os] (bytes_view fragment) {
os << fragment;
});
return os;
}
}

View File

@@ -79,6 +79,7 @@
#include "commitlog_entry.hh"
#include "commitlog_extensions.hh"
#include "service/priority_manager.hh"
#include "serializer.hh"
#include <boost/range/numeric.hpp>
#include <boost/range/adaptor/transformed.hpp>

View File

@@ -32,6 +32,7 @@
#include "gms/gossiper.hh"
#include "utils/small_vector.hh"
#include "lister.hh"
#include "enum_set.hh"
namespace service {
class storage_proxy;

View File

@@ -22,6 +22,8 @@
#include "row_locking.hh"
#include "log.hh"
#include <seastar/core/when_all.hh>
static logging::logger mylog("row_locking");
row_locker::row_locker(schema_ptr s)

View File

@@ -433,15 +433,7 @@ deletable_row& view_updates::get_view_row(const partition_key& base_key, const c
default:
auto& c = update.cells().cell_at(base_col->id);
auto value_view = base_col->is_atomic() ? c.as_atomic_cell(cdef).value() : c.as_collection_mutation().data;
// FIXME: don't linearize.
// This is hard right now, because we are dealing with two different types:
// managed_bytes_view and data::basic_value_view, and we can't put both types in one
// container.
// If IMR transitions to managed_bytes_view, this should be revisited.
if (value_view.is_fragmented()) {
return managed_bytes_view(linearized_values.emplace_back(value_view.linearize()));
}
return value_view.first_fragment();
return value_view;
}
});
auto& partition = partition_for(partition_key::from_range(_view->partition_key_columns() | get_value));

View File

@@ -1,321 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "utils/chunked_vector.hh"
#include "utils/logalloc.hh"
#include "imr/core.hh"
#include "imr/methods.hh"
namespace imr {
namespace alloc {
static const struct no_context_factory_t {
static no_context_t create(const void*) noexcept { return no_context; }
} no_context_factory;
/// Deserialisation context factory
///
/// Deserialisation contexts provide the IMR code with additional information
/// needed to deserialise an IMR object. Often the sources of that information
/// are both the object itself as well as some external state shared by multiple
/// IMR objects of the same type.
/// `context_factory` is a helper class for creating contexts it keeps the
/// shared state (e.g. per-schema information) and when given a pointer to a
/// IMR object creates a deserialisation context for it.
template<typename Context, typename... State>
class context_factory {
std::tuple<State...> _state;
private:
template<size_t... Index>
Context create(const uint8_t* ptr, std::index_sequence<Index...>) const noexcept {
return Context(ptr, std::get<Index>(_state)...);
}
public:
template<typename... Args>
context_factory(Args&&... args) : _state(std::forward<Args>(args)...) { }
context_factory(context_factory&) = default;
context_factory(const context_factory&) = default;
context_factory(context_factory&&) = default;
Context create(const uint8_t* ptr) const noexcept {
return create(ptr, std::index_sequence_for<State...>());
}
};
template<typename T>
concept ContextFactory = requires(const T factory, const uint8_t* ptr) {
{ factory.create(ptr) } noexcept;
};
static_assert(ContextFactory<no_context_factory_t>,
"no_context_factory_t has to meet ContextFactory constraints");
/// LSA migrator for IMR objects
///
/// IMR objects may own memory and therefore moving and destroying them may
/// be non-trivial. This class implements an LSA migrator for an IMR objects
/// of type `Structure`. The deserialisation context needed to invoke the mover
/// is going to be created by the provided context factory `CtxFactory`.
template<typename Structure, typename CtxFactory>
requires ContextFactory<CtxFactory>
class lsa_migrate_fn final : public migrate_fn_type, CtxFactory {
public:
using structure = Structure;
explicit lsa_migrate_fn(CtxFactory context_factory)
: migrate_fn_type(1)
, CtxFactory(std::move(context_factory))
{ }
lsa_migrate_fn(lsa_migrate_fn&&) = delete;
lsa_migrate_fn(const lsa_migrate_fn&) = delete;
lsa_migrate_fn& operator=(lsa_migrate_fn&&) = delete;
lsa_migrate_fn& operator=(const lsa_migrate_fn&) = delete;
virtual void migrate(void* src_ptr, void* dst_ptr, size_t size) const noexcept override {
std::memcpy(dst_ptr, src_ptr, size);
auto dst = static_cast<uint8_t*>(dst_ptr);
methods::move<Structure>(dst, CtxFactory::create(dst));
}
virtual size_t size(const void* obj_ptr) const noexcept override {
auto ptr = static_cast<const uint8_t*>(obj_ptr);
return Structure::serialized_object_size(ptr, CtxFactory::create(ptr));
}
};
// LSA migrator for objects which mover doesn't require a deserialisation context
template<typename Structure>
struct default_lsa_migrate_fn {
static lsa_migrate_fn<Structure, no_context_factory_t> migrate_fn;
};
template<typename Structure>
lsa_migrate_fn<Structure, no_context_factory_t> default_lsa_migrate_fn<Structure>::migrate_fn(no_context_factory);
/// IMR object allocator
///
/// This is a helper class that helps creating IMR objects that may own memory.
/// The serialisation of IMR objects is done in two phases:
/// 1. IMR figures out the size of the object. `sizer` provided by `get_sizer()`
/// records the size of all necessary memory allocations.
/// `allocate_all()` is called and allocates memory for all owned objects.
/// 2. Data is written to the allocated memory. `serializer` returned by
/// `get_serializer()` provides pointers to the allocated buffers and handles
/// their serialisation.
class object_allocator {
union allocation {
static_assert(std::is_trivially_destructible_v<std::pair<size_t, void*>>);
static_assert(std::is_trivially_destructible_v<std::pair<size_t, allocation_strategy::migrate_fn>>);
private:
std::pair<size_t, allocation_strategy::migrate_fn> _allocation_request;
std::pair<size_t, void*> _allocated_object;
public:
explicit allocation(size_t n, allocation_strategy::migrate_fn fn) noexcept
: _allocation_request(std::make_pair(n, fn)) { }
void allocate(allocation_strategy& allocator) {
auto ptr = allocator.alloc(_allocation_request.second, _allocation_request.first, 1);
_allocated_object = std::make_pair(_allocation_request.first, ptr);
}
void free(allocation_strategy& allocator) noexcept {
allocator.free(_allocated_object.second, _allocated_object.first);
}
void set_request_size(size_t n) noexcept {
_allocation_request.first = n;
}
void* pointer() const noexcept { return _allocated_object.second; }
size_t size() const noexcept { return _allocated_object.first; }
};
allocation_strategy& _allocator;
std::vector<allocation> _allocations;
size_t _position = 0;
bool _failed = false;
private:
size_t request(size_t n, allocation_strategy::migrate_fn migrate) noexcept {
auto id = _allocations.size();
try {
_allocations.emplace_back(n, migrate);
} catch (...) {
_failed = true;
}
return id;
}
void set_request_size(size_t id, size_t n) noexcept {
if (__builtin_expect(!_failed, true)) {
_allocations[id].set_request_size(n);
}
}
uint8_t* next_object() noexcept {
return static_cast<uint8_t*>(_allocations[_position++].pointer());
}
public:
class sizer {
object_allocator& _parent;
public:
class continuation {
object_allocator& _parent;
size_t _idx;
public:
continuation(object_allocator& parent, size_t idx) noexcept
: _parent(parent), _idx(idx) { }
uint8_t* run(size_t size) noexcept {
_parent.set_request_size(_idx, size);
return nullptr;
}
};
public:
explicit sizer(object_allocator& parent) noexcept
: _parent(parent) { }
/// Request allocation of an IMR object
///
/// This method request an allocation of an IMR object of type T. The
/// arguments are passed to `T::size_when_serialized`.
///
/// \return null pointer of type `uint8_t*`.
template<typename T, typename MigrateFn, typename... Args>
uint8_t* allocate(MigrateFn* migrate_fn, Args&&... args) noexcept {
static_assert(std::is_same_v<typename MigrateFn::structure, T>);
return do_allocate<T>(migrate_fn, std::forward<Args>(args)...);
}
template<typename T, typename MigrateFn, typename... Args>
auto allocate_nested(MigrateFn* migrate_fn, Args&&... args) noexcept {
static_assert(std::is_same_v<typename MigrateFn::structure, T>);
return do_allocate_nested<T>(migrate_fn, std::forward<Args>(args)...);
}
private:
template<typename T, typename... Args>
uint8_t* do_allocate(migrate_fn_type* migrate_fn, Args&&... args) noexcept {
auto size = T::size_when_serialized(std::forward<Args>(args)...);
_parent.request(size, migrate_fn);
// We are in the sizing phase and only collect information about
// the size of the required objects. The serializer will return
// the real pointer to the memory buffer requested here, but since
// both sizer and serializer need to expose the same interface we
// need to return something from sizer as well even though the
// value will be ignored.
return nullptr;
}
template<typename T, typename... Args>
auto do_allocate_nested(migrate_fn_type* migrate_fn, Args&& ... args) noexcept {
auto n = _parent.request(0, migrate_fn);
return T::get_sizer(continuation(_parent, n),
std::forward<Args>(args)...);
}
};
class serializer {
object_allocator& _parent;
public:
class continuation {
uint8_t* _ptr;
public:
explicit continuation(uint8_t* ptr) noexcept : _ptr(ptr) { }
uint8_t* run(uint8_t*) noexcept {
return _ptr;
}
};
public:
explicit serializer(object_allocator& parent) noexcept
: _parent(parent) { }
/// Writes an IMR object to the preallocated buffer
///
/// In the second serialisation phase this method writes an IMR object
/// to the buffer requested in the sizing phase. Arguments are passed
/// to `T::serialize`.
/// \return pointer to the IMR object
template<typename T, typename MigrateFn, typename... Args>
uint8_t* allocate(MigrateFn* migrate_fn, Args&&... args) noexcept {
static_assert(std::is_same_v<typename MigrateFn::structure, T>);
return do_allocate<T>(migrate_fn, std::forward<Args>(args)...);
}
template<typename T, typename MigrateFn, typename... Args>
auto allocate_nested(MigrateFn* migrate_fn, Args&&... args) noexcept {
static_assert(std::is_same_v<typename MigrateFn::structure, T>);
return do_allocate_nested<T>(migrate_fn, std::forward<Args>(args)...);
}
private:
template<typename T, typename... Args>
uint8_t* do_allocate(migrate_fn_type* migrate_fn, Args&&... args) noexcept {
auto ptr = _parent.next_object();
T::serialize(ptr, std::forward<Args>(args)...);
return ptr;
}
template<typename T, typename... Args>
auto do_allocate_nested(migrate_fn_type*, Args&& ... args) noexcept {
auto ptr = _parent.next_object();
return T::get_serializer(ptr,
continuation(ptr),
std::forward<Args>(args)...);
}
};
public:
explicit object_allocator(allocation_strategy& allocator = current_allocator())
: _allocator(allocator) { }
size_t requested_allocations_count() const noexcept { return _allocations.size(); }
/// Allocates all buffers requested in the sizing phase.
void allocate_all() {
if (__builtin_expect(_failed, false)) {
throw std::bad_alloc();
}
auto it = _allocations.begin();
try {
// TODO: Send a batch of allocations to the allocation strategy.
while (it != _allocations.end()) {
it->allocate(_allocator);
++it;
}
} catch (...) {
while (it != _allocations.begin()) {
--it;
it->free(_allocator);
}
throw;
}
}
sizer get_sizer() noexcept { return sizer(*this); }
serializer get_serializer() noexcept { return serializer(*this); }
};
}
}

View File

@@ -1,591 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <array>
#include <type_traits>
#include <concepts>
#include "utils/meta.hh"
#include "imr/core.hh"
namespace imr {
/// Optionally present object
///
/// Represents a value that may be not present. Information whether or not
/// the optional is engaged is not stored and must be provided by external
/// context.
template<typename Tag, typename Type>
struct optional {
using underlying = Type;
public:
template<::mutable_view is_mutable>
class basic_view {
using pointer_type = std::conditional_t<is_mutable == ::mutable_view::no,
const uint8_t*, uint8_t*>;
pointer_type _ptr;
public:
explicit basic_view(pointer_type ptr) noexcept : _ptr(ptr) { }
operator basic_view<::mutable_view::no>() const noexcept {
return basic_view<::mutable_view::no>(_ptr);
}
template<typename Context = no_context_t>
requires requires(const Context& ctx) {
{ ctx.template context_for<Tag>() } noexcept;
}
auto get(const Context& ctx = no_context) noexcept {
return Type::make_view(_ptr, ctx.template context_for<Tag>(_ptr));
}
};
using view = basic_view<::mutable_view::no>;
using mutable_view = basic_view<::mutable_view::yes>;
public:
template<typename Context = no_context_t>
static auto make_view(const uint8_t* in, const Context& ctx = no_context) noexcept {
return view(in);
}
template<typename Context = no_context_t>
static auto make_view(uint8_t* in, const Context& ctx = no_context) noexcept {
return mutable_view(in);
}
public:
template<typename Context>
requires requires(const Context& ctx) {
{ ctx.template is_present<Tag>() } noexcept -> std::same_as<bool>;
}
static size_t serialized_object_size(const uint8_t* in, const Context& context) noexcept {
return context.template is_present<Tag>()
? Type::serialized_object_size(in, context)
: 0;
}
template<typename... Args>
static size_t size_when_serialized(Args&&... args) noexcept {
return Type::size_when_serialized(std::forward<Args>(args)...);
}
template<typename... Args>
static size_t serialize(uint8_t* out, Args&&... args) noexcept {
return Type::serialize(out, std::forward<Args>(args)...);
}
template<typename Continuation = no_op_continuation>
static auto get_sizer(Continuation cont = no_op_continuation()) {
return Type::get_sizer(std::move(cont));
}
template<typename Continuation = no_op_continuation>
static auto get_serializer(uint8_t* out, Continuation cont = no_op_continuation()) {
return Type::get_serializer(out, std::move(cont));
}
};
template<typename Tag, typename Type>
struct member {
using tag = Tag;
using type = Type;
};
namespace internal {
template<typename Tag>
struct do_find_member {
template<typename Member>
using type = std::is_same<Tag, typename Member::tag>;
};
template<typename Tag, typename... Members>
static constexpr auto get_member_index = meta::find_if<do_find_member<Tag>::template type, Members...>;
template<typename Tag, typename... Members>
using get_member = meta::get<get_member_index<Tag, Members...>, Members...>;
template<size_t Offset, size_t N, template<size_t> typename Function>
struct do_generate_branch_tree {
template<typename... Args>
static decltype(auto) run(size_t n, Args&&... args) {
if constexpr (N == 1) {
return Function<Offset>::run(std::forward<Args>(args)...);
} else if (N >= 2) {
if (n < Offset + N / 2) {
return do_generate_branch_tree<Offset, N / 2, Function>::run(n, std::forward<Args>(args)...);
} else {
return do_generate_branch_tree<Offset + N / 2, N - N / 2, Function>::run(n, std::forward<Args>(args)...);
}
}
}
};
template<size_t N, template<size_t> typename Function>
using generate_branch_tree = do_generate_branch_tree<0, N, Function>;
}
template<typename Tag, typename... Alternatives>
struct variant {
class alternative_index {
size_t _index;
private:
constexpr explicit alternative_index(size_t idx) noexcept
: _index(idx) { }
friend class variant;
public:
constexpr size_t index() const noexcept { return _index; }
};
template<typename AlternativeTag>
constexpr static alternative_index index_for() noexcept {
return alternative_index(internal::get_member_index<AlternativeTag, Alternatives...>);
}
private:
template<size_t N>
struct alternative_visitor {
template<typename Visitor>
static decltype(auto) run(Visitor&& visitor) {
using member = typename meta::get<N, Alternatives...>;
return visitor(static_cast<member*>(nullptr));
}
};
template<typename Visitor>
static decltype(auto) choose_alternative(alternative_index index, Visitor&& visitor) {
// For large sizeof...(Alternatives) a jump table may be the better option.
return internal::generate_branch_tree<sizeof...(Alternatives), alternative_visitor>::run(index.index(), std::forward<Visitor>(visitor));
}
public:
template<::mutable_view is_mutable>
class basic_view {
using pointer_type = std::conditional_t<is_mutable == ::mutable_view::no,
const uint8_t*, uint8_t*>;
pointer_type _ptr;
public:
explicit basic_view(pointer_type ptr) noexcept
: _ptr(ptr)
{ }
pointer_type raw_pointer() const noexcept { return _ptr; }
operator basic_view<::mutable_view::no>() const noexcept {
return basic_view<::mutable_view::no>(_ptr);
}
template<typename AlternativeTag, typename Context = no_context_t>
auto as(const Context& context = no_context) noexcept {
using member = internal::get_member<AlternativeTag, Alternatives...>;
return member::type::make_view(_ptr, context.template context_for<AlternativeTag>(_ptr));
}
template<typename Visitor, typename Context>
decltype(auto) visit(Visitor&& visitor, const Context& context) {
auto alt_idx = context.template active_alternative_of<Tag>();
return choose_alternative(alt_idx, [&] (auto object) {
using type = std::remove_pointer_t<decltype(object)>;
return visitor(type::type::make_view(_ptr, context.template context_for<typename type::tag>(_ptr)));
});
}
template<typename Visitor, typename Context>
decltype(auto) visit_type(Visitor&& visitor, const Context& context) {
auto alt_idx = context.template active_alternative_of<Tag>();
return choose_alternative(alt_idx, [&] (auto object) {
using type = std::remove_pointer_t<decltype(object)>;
return visitor(static_cast<type*>(nullptr));
});
}
};
using view = basic_view<::mutable_view::no>;
using mutable_view = basic_view<::mutable_view::yes>;
public:
template<typename Context>
static view make_view(const uint8_t* in, const Context& context) noexcept {
return view(in);
}
template<typename Context>
static mutable_view make_view(uint8_t* in, const Context& context) noexcept {
return mutable_view(in);
}
public:
template<typename Context>
requires requires(const Context& ctx) {
{ ctx.template active_alternative_of<Tag>() } noexcept -> std::same_as<alternative_index>;
}
static size_t serialized_object_size(const uint8_t* in, const Context& context) noexcept {
return choose_alternative(context.template active_alternative_of<Tag>(), [&] (auto object) noexcept {
using alternative = std::remove_pointer_t<decltype(object)>;
return alternative::type::serialized_object_size(in, context.template context_for<typename alternative::tag>(in));
});
}
template<typename AlternativeTag, typename... Args>
static size_t size_when_serialized(Args&&... args) noexcept {
using member = internal::get_member<AlternativeTag, Alternatives...>;
return member::type::size_when_serialized(std::forward<Args>(args)...);
}
template<typename AlternativeTag, typename... Args>
static size_t serialize(uint8_t* out, Args&&... args) noexcept {
using member = internal::get_member<AlternativeTag, Alternatives...>;
return member::type::serialize(out, std::forward<Args>(args)...);
}
template<typename AlternativeTag, typename Continuation = no_op_continuation>
static auto get_sizer(Continuation cont = no_op_continuation()) {
using member = internal::get_member<AlternativeTag, Alternatives...>;
return member::type::get_sizer(std::move(cont));
}
template<typename AlternativeTag, typename Continuation = no_op_continuation>
static auto get_serializer(uint8_t* out, Continuation cont = no_op_continuation()) {
using member = internal::get_member<AlternativeTag, Alternatives...>;
return member::type::get_serializer(out, std::move(cont));
}
};
template<typename Tag, typename Type>
using optional_member = member<Tag, optional<Tag, Type>>;
template<typename Tag, typename... Types>
using variant_member = member<Tag, variant<Tag, Types...>>;
namespace internal {
template<typename Continuation, typename... Members>
class structure_sizer : Continuation {
size_t _size;
public:
explicit structure_sizer(size_t size, Continuation&& cont) noexcept
: Continuation(std::move(cont)), _size(size) {}
uint8_t* position() const noexcept {
// We are in the sizing phase and there is no object to point to yet.
// The serializer will return a real position in the destination buffer,
// but since sizer and serializer need to expose the same interface we
// need to return something even though the value will be ignored.
return nullptr;
}
auto done() noexcept { return Continuation::run(_size); }
};
template<typename NestedContinuation, typename... Members>
class structure_sizer_continuation : NestedContinuation {
size_t _size;
public:
explicit structure_sizer_continuation(size_t size, NestedContinuation&& cont) noexcept
: NestedContinuation(std::move(cont)), _size(size) {}
structure_sizer<NestedContinuation, Members...> run(size_t size) noexcept {
return structure_sizer<NestedContinuation, Members...>(size + _size,
std::move(*static_cast<NestedContinuation*>(this)));
}
};
template<typename Continuation, typename Member, typename... Members>
class basic_structure_sizer : protected Continuation {
protected:
size_t _size;
using continuation = structure_sizer_continuation<Continuation, Members...>;
public:
explicit basic_structure_sizer(size_t size, Continuation&& cont) noexcept
: Continuation(std::move(cont)), _size(size) {}
uint8_t* position() const noexcept { return nullptr; }
template<typename... Args>
structure_sizer<Continuation, Members...> serialize(Args&& ... args) noexcept {
auto size = Member::type::size_when_serialized(std::forward<Args>(args)...);
return structure_sizer<Continuation, Members...>(size + _size, std::move(*static_cast<Continuation*>(this)));
}
template<typename... Args>
auto serialize_nested(Args&& ... args) noexcept {
return Member::type::get_sizer(continuation(_size, std::move(*static_cast<Continuation*>(this))),
std::forward<Args>(args)...);
}
};
template<typename Continuation, typename Member, typename... Members>
struct structure_sizer<Continuation, Member, Members...>
: basic_structure_sizer<Continuation, Member, Members...> {
using basic_structure_sizer<Continuation, Member, Members...>::basic_structure_sizer;
};
template<typename Continuation, typename Tag, typename Type, typename... Members>
struct structure_sizer<Continuation, optional_member<Tag, Type>, Members...>
: basic_structure_sizer<Continuation, optional_member<Tag, Type>, Members...> {
using basic_structure_sizer<Continuation, optional_member<Tag, Type>, Members...>::basic_structure_sizer;
structure_sizer<Continuation, Members...> skip() noexcept {
return structure_sizer<Continuation, Members...>(this->_size, std::move(*static_cast<Continuation*>(this)));
}
};
template<typename Continuation, typename Tag, typename... Types, typename... Members>
struct structure_sizer<Continuation, variant_member<Tag, Types...>, Members...>
: basic_structure_sizer<Continuation, variant_member<Tag, Types...>, Members...> {
using basic_structure_sizer<Continuation, variant_member<Tag, Types...>, Members...>::basic_structure_sizer;
template<typename... Args>
structure_sizer<Continuation, Members...> serialize(Args&& ... args) noexcept = delete;
template<typename... Args>
auto serialize_nested(Args&& ... args) noexcept = delete;
template<typename AlternativeTag, typename... Args>
structure_sizer<Continuation, Members...> serialize_as(Args&& ... args) noexcept {
using type = variant<Tag, Types...>;
auto size = type::template size_when_serialized<AlternativeTag>(std::forward<Args>(args)...);
return structure_sizer<Continuation, Members...>(size + this->_size, std::move(*static_cast<Continuation*>(this)));
}
template<typename AlternativeTag, typename... Args>
auto serialize_as_nested(Args&& ... args) noexcept {
using type = variant<Tag, Types...>;
using cont_type = typename basic_structure_sizer<Continuation, variant_member<Tag, Types...>, Members...>::continuation;
auto cont = cont_type(this->_size, std::move(*static_cast<Continuation*>(this)));
return type::template get_sizer<AlternativeTag>(std::move(cont),
std::forward<Args>(args)...);
}
};
template<typename Continuation, typename... Members>
class structure_serializer : Continuation {
uint8_t* _out;
public:
explicit structure_serializer(uint8_t* out, Continuation&& cont) noexcept
: Continuation(std::move(cont)), _out(out) {}
uint8_t* position() const noexcept { return _out; }
auto done() noexcept { return Continuation::run(_out); }
};
template<typename NestedContinuation, typename... Members>
struct structure_serializer_continuation : private NestedContinuation {
explicit structure_serializer_continuation(NestedContinuation&& cont) noexcept
: NestedContinuation(std::move(cont)) {}
structure_serializer<NestedContinuation, Members...> run(uint8_t* out) noexcept {
return structure_serializer<NestedContinuation, Members...>(out,
std::move(*static_cast<NestedContinuation*>(this)));
}
};
template<typename Continuation, typename Member, typename... Members>
class basic_structure_serializer : protected Continuation {
protected:
uint8_t* _out;
using continuation = structure_serializer_continuation<Continuation, Members...>;
public:
explicit basic_structure_serializer(uint8_t* out, Continuation&& cont) noexcept
: Continuation(std::move(cont)), _out(out) {}
uint8_t* position() const noexcept { return _out; }
template<typename... Args>
structure_serializer<Continuation, Members...> serialize(Args&& ... args) noexcept {
auto size = Member::type::serialize(_out, std::forward<Args>(args)...);
return structure_serializer<Continuation, Members...>(_out + size, std::move(*static_cast<Continuation*>(this)));
}
template<typename... Args>
auto serialize_nested(Args&& ... args) noexcept {
return Member::type::get_serializer(_out,
continuation(std::move(*static_cast<Continuation*>(this))),
std::forward<Args>(args)...);
}
};
template<typename Continuation, typename Member, typename... Members>
struct structure_serializer<Continuation, Member, Members...>
: basic_structure_serializer<Continuation, Member, Members...> {
using basic_structure_serializer<Continuation, Member, Members...>::basic_structure_serializer;
};
template<typename Continuation, typename Tag, typename Type, typename... Members>
struct structure_serializer<Continuation, optional_member<Tag, Type>, Members...>
: basic_structure_serializer<Continuation, optional_member<Tag, Type>, Members...> {
using basic_structure_serializer<Continuation, optional_member<Tag, Type>, Members...>::basic_structure_serializer;
structure_serializer<Continuation, Members...> skip() noexcept {
return structure_serializer<Continuation, Members...>(this->_out,
std::move(*static_cast<Continuation*>(this)));
}
};
template<typename Continuation, typename Tag, typename... Types, typename... Members>
struct structure_serializer<Continuation, variant_member<Tag, Types...>, Members...>
: basic_structure_serializer<Continuation, variant_member<Tag, Types...>, Members...> {
using basic_structure_serializer<Continuation, variant_member<Tag, Types...>, Members...>::basic_structure_serializer;
template<typename... Args>
structure_serializer<Continuation, Members...> serialize(Args&& ... args) noexcept = delete;
template<typename... Args>
auto serialize_nested(Args&& ... args) noexcept = delete;
template<typename AlternativeTag, typename... Args>
structure_serializer<Continuation, Members...> serialize_as(Args&& ... args) noexcept {
using type = variant<Tag, Types...>;
auto size = type::template serialize<AlternativeTag>(this->_out, std::forward<Args>(args)...);
return structure_serializer<Continuation, Members...>(this->_out + size,
std::move(*static_cast<Continuation*>(this)));
}
template<typename AlternativeTag, typename... Args>
auto serialize_as_nested(Args&& ... args) noexcept {
using type = variant<Tag, Types...>;
using cont_type = typename basic_structure_serializer<Continuation, variant_member<Tag, Types...>, Members...>::continuation;
auto cont = cont_type(std::move(*static_cast<Continuation*>(this)));
return type::template get_serializer<AlternativeTag>(this->_out,
std::move(cont),
std::forward<Args>(args)...);
}
};
}
// Represents a compound type.
template<typename... Members>
struct structure {
template<::mutable_view is_mutable>
class basic_view {
using pointer_type = std::conditional_t<is_mutable == ::mutable_view::no,
const uint8_t*, uint8_t*>;
pointer_type _ptr;
public:
template<typename Context>
explicit basic_view(pointer_type ptr, const Context& context) noexcept : _ptr(ptr) { }
pointer_type raw_pointer() const noexcept { return _ptr; }
operator basic_view<::mutable_view::no>() const noexcept {
return basic_view<::mutable_view::no>(_ptr, no_context);
}
template<typename Tag, typename Context = no_context_t>
auto offset_of(const Context& context = no_context) const noexcept {
static constexpr auto idx = internal::get_member_index<Tag, Members...>;
size_t total_size = 0;
meta::for_each<meta::take<idx, Members...>>([&] (auto ptr) {
using member = std::remove_pointer_t<decltype(ptr)>;
auto offset = _ptr + total_size;
auto this_size = member::type::serialized_object_size(offset, context.template context_for<typename member::tag>(offset));
total_size += this_size;
});
return total_size;
}
template<typename Tag, typename Context = no_context_t>
auto get(const Context& context = no_context) const noexcept {
using member = internal::get_member<Tag, Members...>;
auto offset = _ptr + offset_of<Tag, Context>(context);
return member::type::make_view(offset, context.template context_for<Tag>(offset));
}
};
using view = basic_view<::mutable_view::no>;
using mutable_view = basic_view<::mutable_view::yes>;
public:
template<typename Context = no_context_t>
static view make_view(const uint8_t* in, const Context& context = no_context) noexcept {
return view(in, context);
}
template<typename Context = no_context_t>
static mutable_view make_view(uint8_t* in, const Context& context = no_context) noexcept {
return mutable_view(in, context);
}
public:
template<typename Context = no_context_t>
static size_t serialized_object_size(const uint8_t* in, const Context& context = no_context) noexcept {
size_t total_size = 0;
meta::for_each<Members...>([&] (auto ptr) noexcept {
using member = std::remove_pointer_t<decltype(ptr)>;
auto offset = in + total_size;
auto this_size = member::type::serialized_object_size(offset, context.template context_for<typename member::tag>(offset));
total_size += this_size;
});
return total_size;
}
template<typename Continuation = no_op_continuation>
static internal::structure_sizer<Continuation, Members...> get_sizer(Continuation cont = no_op_continuation()) {
return internal::structure_sizer<Continuation, Members...>(0, std::move(cont));
}
template<typename Continuation = no_op_continuation>
static internal::structure_serializer<Continuation, Members...> get_serializer(uint8_t* out, Continuation cont = no_op_continuation()) {
return internal::structure_serializer<Continuation, Members...>(out, std::move(cont));
}
template<typename Writer, typename... Args>
static size_t size_when_serialized(Writer&& writer, Args&&... args) noexcept {
return std::forward<Writer>(writer)(get_sizer(), std::forward<Args>(args)...);
}
template<typename Writer, typename... Args>
static size_t serialize(uint8_t* out, Writer&& writer, Args&&... args) noexcept {
auto ptr = std::forward<Writer>(writer)(get_serializer(out), std::forward<Args>(args)...);
return ptr - out;
}
template<typename Tag, typename Context = no_context_t>
static size_t offset_of(const uint8_t* in, const Context& context = no_context) noexcept {
static constexpr auto idx = internal::get_member_index<Tag, Members...>;
size_t total_size = 0;
meta::for_each<meta::take<idx, Members...>>([&] (auto ptr) noexcept {
using member = std::remove_pointer_t<decltype(ptr)>;
auto offset = in + total_size;
auto this_size = member::type::serialized_object_size(offset, context.template context_for<typename member::tag>(offset));
total_size += this_size;
});
return total_size;
}
template<typename Tag, typename Context = no_context_t>
static auto get_member(const uint8_t* in, const Context& context = no_context) noexcept {
auto off = offset_of<Tag>(in, context);
using member = internal::get_member<Tag, Members...>;
return member::type::make_view(in + off, context.template context_for<typename member::tag>(in + off));
}
template<typename Tag, typename Context = decltype(no_context)>
static auto get_member(uint8_t* in, const Context& context = no_context) noexcept {
auto off = offset_of<Tag>(in, context);
using member = internal::get_member<Tag, Members...>;
return member::type::make_view(in + off, context.template context_for<typename member::tag>(in + off));
}
};
template<typename Tag, typename T>
struct tagged_type : T { };
}

View File

@@ -1,83 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <concepts>
#include "imr/alloc.hh"
#include "imr/compound.hh"
#include "imr/fundamental.hh"
namespace imr {
/// Check if a type T is a sizer for Structure.
template<typename Structure, typename T>
struct is_sizer_for : std::false_type { };
template<typename Continuation, typename... Members>
struct is_sizer_for<structure<Members...>,
internal::structure_sizer<Continuation, Members...>>
: std::true_type { };
template<typename Structure, typename T>
constexpr bool is_sizer_for_v = is_sizer_for<Structure, T>::value;
/// Check if a type T is a serializer for Structure.
template<typename Structure, typename T>
struct is_serializer_for : std::false_type { };
template<typename Continuation, typename... Members>
struct is_serializer_for<structure<Members...>,
internal::structure_serializer<Continuation, Members...>>
: std::true_type { };
template<typename Structure, typename T>
constexpr bool is_serializer_for_v = is_serializer_for<Structure, T>::value;
/// The default sizer for Structure.
template<typename Structure>
using default_sizer_t = decltype(Structure::get_sizer());
/// The default serializer for Structure.
template<typename Structure>
using default_serializer_t = decltype(Structure::get_serializer(nullptr));
/// A simple writer that accepts only sizer or serializer as an argument.
template<typename Writer, typename Structure>
concept WriterSimple = requires(Writer writer, default_sizer_t<Structure> sizer,
default_serializer_t<Structure> serializer)
{
writer(sizer);
writer(serializer);
};
/// A writer that accepts both sizer or serializer and a memory allocator.
template<typename Writer, typename Structure>
concept WriterAllocator = requires(Writer writer, default_sizer_t<Structure> sizer,
default_serializer_t<Structure> serializer,
imr::alloc::object_allocator::sizer alloc_sizer,
imr::alloc::object_allocator::serializer alloc_serializer)
{
writer(sizer, alloc_sizer);
writer(serializer, alloc_serializer);
};
}

View File

@@ -1,63 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "utils/fragment_range.hh"
namespace imr {
/// No-op deserialisation context
///
/// This is a dummy deserialisation context to be used when there is no need
/// for one, but the interface expects a context object.
static const struct no_context_t {
template<typename Tag, typename... Args>
const no_context_t& context_for(Args&&...) const noexcept { return *this; }
} no_context;
struct no_op_continuation {
template<typename T>
static T run(T value) noexcept {
return value;
}
};
template<typename T>
class placeholder {
uint8_t* _pointer = nullptr;
public:
placeholder() = default;
explicit placeholder(uint8_t* ptr) noexcept : _pointer(ptr) { }
void set_pointer(uint8_t* ptr) noexcept { _pointer = ptr; }
template<typename... Args>
void serialize(Args&&... args) noexcept {
if (!_pointer) {
// We lose the information whether we are in the sizing or
// serializing phase, hence the need for this run-time check.
return;
}
T::serialize(_pointer, std::forward<Args>(args)...);
}
};
}

View File

@@ -1,308 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <boost/range/algorithm/for_each.hpp>
#include <seastar/core/align.hh>
#include <seastar/core/bitops.hh>
#include "bytes.hh"
#include "utils/meta.hh"
#include "imr/core.hh"
namespace imr {
namespace internal {
template<typename T, typename CharT>
requires std::is_standard_layout_v<T> && std::is_trivial_v<T> && (sizeof(CharT) == 1)
inline T read_pod(const CharT* in) noexcept {
T obj;
std::copy_n(in, sizeof(T), reinterpret_cast<CharT*>(&obj));
return obj;
}
template<typename T, typename CharT>
requires std::is_standard_layout_v<T> && std::is_trivial_v<T> && (sizeof(CharT) == 1)
inline void write_pod(T obj, CharT* out) noexcept {
std::copy_n(reinterpret_cast<const CharT*>(&obj), sizeof(T), out);
}
}
template<typename Tag>
class set_flag {
bool _value = true;
public:
set_flag() = default;
explicit set_flag(bool v) noexcept : _value(v) { }
bool value() const noexcept { return _value; }
};
/// Set of flags
///
/// Represents a fixed-size set of tagged flags.
template<typename... Tags>
class flags {
static constexpr auto object_size = seastar::align_up<size_t>(sizeof...(Tags), 8) / 8;
private:
template<typename Tag>
static void do_set_or_clear(uint8_t* ptr, bool set) noexcept {
const auto idx = meta::find<Tag, Tags...>;
const auto byte_idx = idx / 8;
const auto bit_idx = idx % 8;
auto value = ptr[byte_idx];
value &= ~uint8_t(1 << bit_idx);
value |= uint8_t(set) << bit_idx;
ptr[byte_idx] = value;
}
template<typename Tag>
static bool do_get(const uint8_t* ptr) noexcept {
const auto idx = meta::find<Tag, Tags...>;
const auto byte_idx = idx / 8;
const auto bit_idx = idx % 8;
return ptr[byte_idx] & (1 << bit_idx);
}
public:
template<::mutable_view is_mutable>
class basic_view {
using pointer_type = std::conditional_t<is_mutable == ::mutable_view::no,
const uint8_t*, uint8_t*>;
pointer_type _ptr;
public:
explicit basic_view(pointer_type ptr) noexcept : _ptr(ptr) { }
operator basic_view<::mutable_view::no>() const noexcept {
return basic_view<::mutable_view::no>(_ptr);
}
template<typename Tag>
bool get() const noexcept {
return do_get<Tag>(_ptr);
}
template<typename Tag>
void set(bool value = true) noexcept {
do_set_or_clear<Tag>(_ptr, value);
}
};
using view = basic_view<::mutable_view::no>;
using mutable_view = basic_view<::mutable_view::yes>;
public:
template<typename Context = no_context_t>
static view make_view(const uint8_t* in, const Context& = no_context) noexcept {
return view(in);
}
template<typename Context = no_context_t>
static mutable_view make_view(uint8_t* in, const Context& = no_context) noexcept {
return mutable_view(in);
}
public:
template<typename Context = no_context_t>
static size_t serialized_object_size(const uint8_t*, const Context& = no_context) noexcept {
return object_size;
}
template<typename... Tags1>
static size_t size_when_serialized(set_flag<Tags1>...) noexcept {
return object_size;
}
template<typename... Tags1>
static size_t serialize(uint8_t* out, set_flag<Tags1>... sfs) noexcept {
std::fill_n(out, object_size, 0);
(do_set_or_clear<Tags1>(out, sfs.value()), ...);
return object_size;
}
static size_t size_when_serialized(placeholder<flags<Tags...>>&) noexcept {
return object_size;
}
static size_t serialize(uint8_t* out, placeholder<flags<Tags...>>& phldr) noexcept {
phldr.set_pointer(out);
return object_size;
}
};
/// POD object
///
/// Represents a fixed-size POD value.
template<typename Type>
requires std::is_standard_layout_v<Type> && std::is_trivial_v<Type>
struct pod {
using underlying = Type;
enum : size_t {
size = sizeof(Type),
};
template<::mutable_view is_mutable>
class basic_view {
using pointer_type = std::conditional_t<is_mutable == ::mutable_view::no,
const uint8_t*, uint8_t*>;
pointer_type _ptr;
public:
explicit basic_view(pointer_type ptr) noexcept : _ptr(ptr) { }
operator basic_view<::mutable_view::no>() const noexcept {
return basic_view<::mutable_view::no>(_ptr);
}
Type load() const noexcept {
return internal::read_pod<Type>(_ptr);
}
void store(const Type& object) noexcept {
internal::write_pod(object, _ptr);
}
};
using view = basic_view<::mutable_view::no>;
using mutable_view = basic_view<::mutable_view::yes>;
public:
template<typename Context = no_context_t>
static view make_view(const uint8_t* in, const Context& = no_context) noexcept {
return view(in);
}
template<typename Context = no_context_t>
static mutable_view make_view(uint8_t* in, const Context& = no_context) noexcept {
return mutable_view(in);
}
public:
template<typename Context = no_context_t>
static size_t serialized_object_size(const uint8_t*, const Context& = no_context) noexcept {
return sizeof(Type);
}
static size_t size_when_serialized(const Type&) noexcept {
return sizeof(Type);
}
static size_t serialize(uint8_t* out, const Type& value) noexcept {
internal::write_pod(value, out);
return sizeof(Type);
}
static size_t size_when_serialized(placeholder<pod<Type>>&) noexcept {
return sizeof(Type);
}
static size_t serialize(uint8_t* out, placeholder<pod<Type>>& phldr) noexcept {
phldr.set_pointer(out);
return sizeof(Type);
}
};
/// Buffer
///
/// Represents an opaque buffer. The size of the buffer is not stored and must
/// be provided by external context.
/// A buffer can be created from a bytes_view, a fragments range or a
/// (size, serializer) pair.
template<typename Tag>
struct buffer {
using view = bytes_view;
using mutable_view = bytes_mutable_view;
template<::mutable_view is_mutable>
using basic_view = std::conditional_t<is_mutable == ::mutable_view::no, view, mutable_view>;
template<typename Context>
requires requires(const Context& ctx) {
{ ctx.template size_of<Tag>() } noexcept -> std::same_as<size_t>;
}
static view make_view(const uint8_t* in, const Context& context) noexcept {
auto ptr = reinterpret_cast<bytes_view::const_pointer>(in);
return bytes_view(ptr, context.template size_of<Tag>());
}
template<typename Context>
requires requires(const Context& ctx) {
{ ctx.template size_of<Tag>() } noexcept -> std::same_as<size_t>;
}
static mutable_view make_view(uint8_t* in, const Context& context) noexcept {
auto ptr = reinterpret_cast<bytes_mutable_view::pointer>(in);
return bytes_mutable_view(ptr, context.template size_of<Tag>());
}
public:
template<typename Context>
requires requires(const Context& ctx) {
{ ctx.template size_of<Tag>() } noexcept -> std::same_as<size_t>;
}
static size_t serialized_object_size(const uint8_t*, const Context& context) noexcept {
return context.template size_of<Tag>();
}
static size_t size_when_serialized(bytes_view src) noexcept {
return src.size();
}
template<typename Serializer>
requires requires (Serializer ser, uint8_t* ptr) {
{ ser(ptr) } noexcept;
}
static size_t size_when_serialized(size_t size, Serializer&&) noexcept {
return size;
}
template<typename FragmentRange, typename = std::enable_if_t<is_fragment_range_v<std::decay_t<FragmentRange>>>>
static size_t size_when_serialized(FragmentRange&& fragments) {
return fragments.size_bytes();
}
static size_t serialize(uint8_t* out, bytes_view src) {
std::copy_n(src.begin(), src.size(),
reinterpret_cast<bytes_view::value_type*>(out));
return src.size();
}
template<typename FragmentRange, typename = std::enable_if_t<is_fragment_range_v<std::decay_t<FragmentRange>>>>
static size_t serialize(uint8_t* out, FragmentRange&& fragments) {
auto dst = reinterpret_cast<bytes_view::value_type*>(out);
using boost::range::for_each;
for_each(fragments, [&] (bytes_view fragment) {
dst = std::copy(fragment.begin(), fragment.end(), dst);
});
return fragments.size_bytes();
}
template<typename Serializer>
requires requires (Serializer ser, uint8_t* ptr) {
{ ser(ptr) } noexcept;
}
static size_t serialize(uint8_t* out, size_t size, Serializer&& serializer) noexcept {
std::forward<Serializer>(serializer)(out);
return size;
}
};
}

View File

@@ -1,169 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "compound.hh"
namespace imr {
namespace methods {
template<template<class> typename Method>
struct trivial_method {
template<typename... Args>
static void run(Args&&...) noexcept { }
};
template<template<class> typename Method, typename T>
using has_trivial_method = std::is_base_of<trivial_method<Method>, Method<T>>;
namespace internal {
template<template<class> typename Method, typename...>
struct generate_method : trivial_method<Method> { };
template<template<class> typename Method, typename Structure, typename... Tags, typename... Types>
struct generate_method<Method, Structure, member<Tags, Types>...> {
template<typename Context, typename... Args>
static void run(uint8_t* ptr, const Context& context, Args&&... args) noexcept {
auto view = Structure::make_view(ptr, context);
meta::for_each<member<Tags, Types>...>([&] (auto member_type) {
using member = std::remove_pointer_t<decltype(member_type)>;
auto member_ptr = ptr + view.template offset_of<typename member::tag>();
Method<typename member::type>::run(member_ptr,
context.template context_for<typename member::tag>(member_ptr),
std::forward<Args>(args)...);
});
}
};
template<template<class> typename Method, typename Tag, typename Type>
struct generate_method<Method, optional<Tag, Type>> {
template<typename Context, typename... Args>
static void run(uint8_t* ptr, const Context& context, Args&&... args) noexcept {
if (context.template is_present<Tag>()) {
Method<Type>::run(ptr,
context.template context_for<Tag>(ptr),
std::forward<Args>(args)...);
}
}
};
template<template<class> typename Method, typename Tag, typename... Members>
struct generate_method<Method, variant<Tag, Members...>> {
template<typename Context, typename... Args>
static void run(uint8_t* ptr, const Context& context, Args&&... args) noexcept {
auto view = variant<Tag, Members...>::make_view(ptr, context);
view.visit_type([&] (auto alternative_type) {
using member = std::remove_pointer_t<decltype(alternative_type)>;
Method<typename member::type>::run(ptr,
context.template context_for<typename member::tag>(ptr),
std::forward<Args>(args)...);
}, context);
}
};
template<template<class> typename Method>
struct member_has_trivial_method {
template<typename T>
struct type;
};
template<template<class> typename Method>
template<typename Tag, typename Type>
struct member_has_trivial_method<Method>::type<member<Tag, Type>> : has_trivial_method<Method, Type> { };
template<template<class> typename Method, typename T>
struct get_method;
template<template<class> typename Method, typename... Members>
struct get_method<Method, structure<Members...>>
: std::conditional_t<meta::all_of<member_has_trivial_method<Method>::template type, Members...>,
trivial_method<Method>,
generate_method<Method, structure<Members...>, Members...>>
{ };
template<template<class> typename Method, typename Tag, typename Type>
struct get_method<Method, optional<Tag, Type>>
: std::conditional_t<has_trivial_method<Method, Type>::value,
trivial_method<Method>,
generate_method<Method, optional<Tag, Type>>>
{ };
template<template<class> typename Method, typename Tag, typename... Members>
struct get_method<Method, variant<Tag, Members...>>
: std::conditional_t<meta::all_of<member_has_trivial_method<Method>::template type, Members...>,
trivial_method<Method>,
generate_method<Method, variant<Tag, Members...>>>
{ };
template<template<class> typename Method, typename Tag, typename Type>
struct get_method<Method, tagged_type<Tag,Type>>
: std::conditional_t<has_trivial_method<Method, Type>::value,
trivial_method<Method>,
Method<Type>>
{ };
}
template<typename T>
struct destructor : trivial_method<destructor> { };
using trivial_destructor = trivial_method<destructor>;
template<typename T>
using is_trivially_destructible = has_trivial_method<destructor, T>;
template<typename... Members>
struct destructor<structure<Members...>> : internal::get_method<destructor, structure<Members...>> { };
template<typename Tag, typename Type>
struct destructor<optional<Tag, Type>> : internal::get_method<destructor, optional<Tag, Type>> { };
template<typename Tag, typename... Members>
struct destructor<variant<Tag, Members...>> : internal::get_method<destructor, variant<Tag, Members...>> { };
template<typename T, typename Context = decltype(no_context)>
void destroy(uint8_t* ptr, const Context& context = no_context) {
destructor<T>::run(ptr, context);
}
template<typename T>
struct mover : trivial_method<mover> { };
using trivial_mover = trivial_method<mover>;
template<typename T>
using is_trivially_movable = has_trivial_method<mover, T>;
template<typename... Members>
struct mover<structure<Members...>> : internal::get_method<mover, structure<Members...>> { };
template<typename Tag, typename Type>
struct mover<optional<Tag, Type>> : internal::get_method<mover, optional<Tag, Type>> { };
template<typename Tag, typename... Members>
struct mover<variant<Tag, Members...>> : internal::get_method<mover, variant<Tag, Members...>> { };
template<typename T, typename Context = decltype(no_context)>
void move(uint8_t* ptr, const Context& context = no_context) {
mover<T>::run(ptr, context);
}
}
}

View File

@@ -1,201 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <type_traits>
#include "imr/core.hh"
#include "imr/alloc.hh"
#include "imr/concepts.hh"
namespace imr {
namespace utils {
class basic_object {
public:
struct tags {
class back_pointer { };
class object { };
};
protected:
uint8_t* _data = nullptr;
friend struct methods::mover<imr::tagged_type<tags::back_pointer, imr::pod<basic_object*>>>;
protected:
explicit basic_object(uint8_t* ptr) noexcept : _data(ptr) { }
void set_data(uint8_t* ptr) noexcept { _data = ptr; }
public:
basic_object() = default;
basic_object(basic_object&& other) noexcept : _data(std::exchange(other._data, nullptr)) { }
basic_object(const basic_object&) = delete;
};
template<typename Context, typename... State>
class object_context {
std::tuple<State...> _state;
private:
template<size_t... Index>
Context create(const uint8_t* ptr, std::index_sequence<Index...>) const noexcept {
return Context(ptr, std::get<Index>(_state)...);
}
public:
object_context(const uint8_t*, State... state) : _state { state... } { }
template<typename Tag, typename... Args>
auto context_for(const uint8_t* ptr, Args&&... args) const noexcept {
if constexpr (std::is_same_v<Tag, basic_object::tags::back_pointer>) {
return no_context_t();
} else {
return create(ptr, std::index_sequence_for<State...>());
}
}
};
}
namespace methods {
template<>
struct mover<imr::tagged_type<utils::basic_object::tags::back_pointer, imr::pod<utils::basic_object*>>> {
static void run(uint8_t* ptr, ...) {
auto bptr = imr::tagged_type<utils::basic_object::tags::back_pointer, imr::pod<utils::basic_object*>>::make_view(ptr).load();
bptr->_data = ptr;
}
};
}
namespace utils {
/// Unique pointer to an IMR object
///
/// This is an LSA-aware unique-owner pointer to an IMR object.
template<typename Structure>
class object : public basic_object {
public:
using structure = imr::structure<
imr::member<tags::back_pointer, imr::tagged_type<tags::back_pointer, imr::pod<basic_object*>>>,
imr::member<tags::object, Structure>
>;
static constexpr size_t size_overhead = sizeof(basic_object*);
private:
explicit object(uint8_t* ptr) noexcept
: basic_object(ptr)
{
structure::template get_member<tags::back_pointer>(_data).store(this);
}
public:
object() = default;
object(object&& other) noexcept : basic_object(std::move(other)) {
if (_data) {
structure::template get_member<tags::back_pointer>(_data).store(this);
}
}
object& operator=(object&& other) noexcept {
swap(other);
return *this;
}
~object() {
if (_data) {
imr::methods::destroy<structure>(_data);
current_allocator().free(_data);
}
}
void swap(object& other) noexcept {
std::swap(_data, other._data);
if (_data) {
structure::template get_member<tags::back_pointer>(_data).store(this);
}
if (other._data) {
structure::template get_member<tags::back_pointer>(other._data).store(&other);
}
}
explicit operator bool() const noexcept { return bool(_data); }
uint8_t* get() noexcept { return _data ? _data + structure::template offset_of<tags::object>(_data) : nullptr; }
const uint8_t* get() const noexcept { return _data ? _data + structure::template offset_of<tags::object>(_data) : nullptr; }
/// Creates an IMR object from a raw writer
///
/// This low-level function creates an IMR object owned by `object` using
/// a raw writer (i.e. does not necessarily follow the standard IMR
/// serialisation process). This is useful for fast copying of trivial
/// IMR objects.
///
/// \note This function could be deprecated once the IMR starts supporting
/// copying IMR objects.
template<typename RawWriter>
requires requires (RawWriter wr, uint8_t* ptr) {
{ wr(ptr) } noexcept;
}
static object make_raw(size_t len, RawWriter&& wr, allocation_strategy::migrate_fn migrate = &imr::alloc::default_lsa_migrate_fn<structure>::migrate_fn) {
object obj;
auto ptr = static_cast<uint8_t*>(current_allocator().alloc(migrate, sizeof(void*) + len, 1));
wr(ptr + sizeof(void*));
auto view = structure::make_view(ptr);
view.template get<tags::back_pointer>().store(&obj);
obj.set_data(ptr);
return obj;
}
/// Create an IMR objects
template<typename Writer, typename MigrateFn>
requires WriterAllocator<Writer, Structure>
static object make(Writer&& object_writer,
MigrateFn* migrate = &imr::alloc::default_lsa_migrate_fn<structure>::migrate_fn) {
static_assert(std::is_same_v<typename MigrateFn::structure, structure>);
return do_make(std::forward<Writer>(object_writer), migrate);
}
private:
template<typename Writer>
requires WriterAllocator<Writer, Structure>
static object do_make(Writer&& object_writer, allocation_strategy::migrate_fn migrate) {
struct alloc_deleter {
size_t _size;
void operator()(uint8_t* ptr) {
current_allocator().free(ptr, _size);
}
};
using alloc_unique_ptr = std::unique_ptr<uint8_t[], alloc_deleter>;
auto writer = [&object_writer] (auto&& ser, auto&& alloc) {
return object_writer(ser.serialize(nullptr).serialize_nested(), alloc).done();
};
auto& alloc = current_allocator();
alloc::object_allocator allocator(alloc);
auto obj_size = structure::size_when_serialized(writer, allocator.get_sizer());
auto ptr = alloc_unique_ptr(static_cast<uint8_t*>(alloc.alloc(migrate, obj_size, 1)), alloc_deleter { obj_size });
allocator.allocate_all();
structure::serialize(ptr.get(), writer, allocator.get_serializer());
return object(ptr.release());
}
};
}
}

1
lua.cc
View File

@@ -25,6 +25,7 @@
#include "utils/utf8.hh"
#include "utils/ascii.hh"
#include "utils/date.h"
#include <seastar/core/align.hh>
#include <lua.hpp>
// Lua 5.4 added an extra parameter to lua_resume

View File

@@ -711,7 +711,7 @@ void write_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell
} else {
return std::move(wr).skip_expiry();
}
}().write_fragmented_value(c.value());
}().write_fragmented_value(fragment_range(c.value()));
[&, wr = std::move(after_value)] () mutable {
if (slice.options.contains<query::partition_slice::option::send_ttl>() && c.is_live_and_has_ttl()) {
return std::move(wr).write_ttl(c.ttl());
@@ -738,7 +738,7 @@ void write_cell(RowWriter& w, const query::partition_slice& slice, data_type typ
template<typename RowWriter>
void write_counter_cell(RowWriter& w, const query::partition_slice& slice, ::atomic_cell_view c) {
assert(c.is_live());
counter_cell_view::with_linearized(c, [&] (counter_cell_view ccv) {
auto ccv = counter_cell_view(c);
auto wr = w.add().write();
[&, wr = std::move(wr)] () mutable {
if (slice.options.contains<query::partition_slice::option::send_timestamp>()) {
@@ -750,7 +750,6 @@ void write_counter_cell(RowWriter& w, const query::partition_slice& slice, ::ato
.write_value(counter_cell_view::total_value_type()->decompose(ccv.total_value()))
.skip_ttl()
.end_qr_cell();
});
}
template<typename Hasher>

View File

@@ -45,7 +45,7 @@ template<typename Writer>
auto write_live_cell(Writer&& writer, atomic_cell_view c)
{
return std::move(writer).write_created_at(c.timestamp())
.write_fragmented_value(c.value())
.write_fragmented_value(fragment_range(c.value()))
.end_live_cell();
}
@@ -60,14 +60,13 @@ auto write_counter_cell(Writer&& writer, atomic_cell_view c)
.write_delta(delta)
.end_counter_cell_update();
} else {
return counter_cell_view::with_linearized(c, [&] (counter_cell_view ccv) {
auto ccv = counter_cell_view(c);
auto shards = std::move(value).start_value_counter_cell_full()
.start_shards();
for (auto csv : ccv.shards()) {
shards.add_shards(counter_shard(csv));
}
return std::move(shards).end_shards().end_counter_cell_full();
});
}
}().end_counter_cell();
}
@@ -79,7 +78,7 @@ auto write_expiring_cell(Writer&& writer, atomic_cell_view c)
.write_expiry(c.expiry())
.start_c()
.write_created_at(c.timestamp())
.write_fragmented_value(c.value())
.write_fragmented_value(fragment_range(c.value()))
.end_c()
.end_expiring_cell();
}

View File

@@ -28,6 +28,7 @@
#include <seastar/core/sstring.hh>
#include <seastar/core/sharded.hh>
#include <seastar/core/future.hh>
#include <seastar/core/condition-variable.hh>
#include "database_fwd.hh"
#include "frozen_mutation.hh"

View File

@@ -46,6 +46,7 @@
#include "gms/endpoint_state.hh"
#include <seastar/core/distributed.hh>
#include <seastar/core/abort_source.hh>
#include <seastar/core/gate.hh>
#include "gms/inet_address.hh"
#include "gms/feature.hh"
#include "message/msg_addr.hh"

View File

@@ -30,6 +30,7 @@
#include <seastar/core/metrics_registration.hh>
#include <seastar/core/scheduling.hh>
#include <seastar/core/abort_source.hh>
#include <seastar/core/condition-variable.hh>
#include "log.hh"
#include "utils/exponential_backoff_retry.hh"
#include <vector>

View File

@@ -182,13 +182,12 @@ void sstable_writer_k_l::write_cell(file_writer& out, atomic_cell_view cell, con
column_mask mask = column_mask::counter;
write(_version, out, mask, int64_t(0), timestamp);
counter_cell_view::with_linearized(cell, [&] (counter_cell_view ccv) {
auto ccv = counter_cell_view(cell);
write_counter_value(ccv, out, _version, [v = _version] (file_writer& out, uint32_t value) {
return write(v, out, value);
});
_c_stats.update_local_deletion_time(std::numeric_limits<int>::max());
});
} else if (cell.is_live_and_has_ttl()) {
// expiring cell

View File

@@ -1133,10 +1133,9 @@ void writer::write_cell(bytes_ostream& writer, const clustering_key_prefix* clus
if (cdef.is_counter()) {
if (!is_deleted) {
assert(!cell.is_counter_update());
counter_cell_view::with_linearized(cell, [&] (counter_cell_view ccv) {
write_counter_value(ccv, writer, _sst.get_version(), [] (bytes_ostream& out, uint32_t value) {
return write_vint(out, value);
});
auto ccv = counter_cell_view(cell);
write_counter_value(ccv, writer, _sst.get_version(), [] (bytes_ostream& out, uint32_t value) {
return write_vint(out, value);
});
}
} else {

View File

@@ -29,7 +29,6 @@
#include "vint-serialization.hh"
#include <seastar/core/byteorder.hh>
#include "version.hh"
#include "data/value_view.hh"
#include "counters.hh"
#include "service/storage_service.hh"
@@ -375,10 +374,11 @@ inline void write(sstable_version_types v, file_writer& out, const disk_string_v
template<typename SizeType>
inline void write(sstable_version_types ver, file_writer& out, const disk_data_value_view<SizeType>& v) {
SizeType length;
check_truncate_and_assign(length, v.value.size_bytes());
check_truncate_and_assign(length, v.value.size());
write(ver, out, length);
using boost::range::for_each;
for_each(v.value, [&] (bytes_view fragment) { write(ver, out, fragment); });
for (bytes_view frag : fragment_range(v.value)) {
write(ver, out, frag);
}
}
template <typename Members>
@@ -576,18 +576,6 @@ void write_cell_value(sstable_version_types v, W& out, const abstract_type& type
}
}
template <typename W>
requires Writer<W>
void write_cell_value(sstable_version_types v, W& out, const abstract_type& type, atomic_cell_value_view value) {
if (!value.empty()) {
if (!type.value_length_if_fixed()) {
write_vint(out, value.size_bytes());
}
using boost::range::for_each;
for_each(value, [&] (bytes_view fragment) { write(v, out, fragment); });
}
}
template <typename WriteLengthFunc, typename W>
requires Writer<W>
void write_counter_value(counter_cell_view ccv, W& out, sstable_version_types v, WriteLengthFunc&& write_len_func) {

View File

@@ -70,7 +70,8 @@ SEASTAR_TEST_CASE(test_counter_cell) {
auto c1 = atomic_cell_or_collection(b1.build(0));
atomic_cell_or_collection c2;
counter_cell_view::with_linearized(c1.as_atomic_cell(cdef), [&] (counter_cell_view cv) {
{
counter_cell_view cv(c1.as_atomic_cell(cdef));
BOOST_REQUIRE_EQUAL(cv.total_value(), 1);
verify_shard_order(cv);
@@ -78,18 +79,20 @@ SEASTAR_TEST_CASE(test_counter_cell) {
b2.add_shard(counter_shard(*cv.get_shard(id[0])).update(2, 1));
b2.add_shard(counter_shard(id[2], 1, 1));
c2 = atomic_cell_or_collection(b2.build(0));
});
}
counter_cell_view::with_linearized(c2.as_atomic_cell(cdef), [&] (counter_cell_view cv) {
{
counter_cell_view cv(c2.as_atomic_cell(cdef));
BOOST_REQUIRE_EQUAL(cv.total_value(), 8);
verify_shard_order(cv);
});
}
counter_cell_view::apply(cdef, c1, c2);
counter_cell_view::with_linearized(c1.as_atomic_cell(cdef), [&] (counter_cell_view cv) {
{
counter_cell_view cv(c1.as_atomic_cell(cdef));
BOOST_REQUIRE_EQUAL(cv.total_value(), 4);
verify_shard_order(cv);
});
}
});
}
@@ -102,10 +105,11 @@ SEASTAR_TEST_CASE(test_apply) {
auto src = b.copy(*cdef.type);
counter_cell_view::apply(cdef, dst, src);
counter_cell_view::with_linearized(dst.as_atomic_cell(cdef), [&] (counter_cell_view cv) {
{
counter_cell_view cv(dst.as_atomic_cell(cdef));
BOOST_REQUIRE_EQUAL(cv.total_value(), value);
BOOST_REQUIRE_EQUAL(cv.timestamp(), std::max(dst.as_atomic_cell(cdef).timestamp(), src.as_atomic_cell(cdef).timestamp()));
});
}
};
auto id = generate_ids(5);
@@ -241,17 +245,19 @@ SEASTAR_TEST_CASE(test_counter_mutations) {
m.apply(m2);
auto ac = get_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), -102);
verify_shard_order(ccv);
});
}
ac = get_static_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 20);
verify_shard_order(ccv);
});
}
m.apply(m3);
ac = get_counter_cell(m);
@@ -271,32 +277,36 @@ SEASTAR_TEST_CASE(test_counter_mutations) {
m = mutation(s, m1.decorated_key(), m1.partition().difference(s, m2.partition()));
ac = get_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 2);
verify_shard_order(ccv);
});
}
ac = get_static_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 11);
verify_shard_order(ccv);
});
}
m = mutation(s, m1.decorated_key(), m2.partition().difference(s, m1.partition()));
ac = get_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), -105);
verify_shard_order(ccv);
});
}
ac = get_static_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 9);
verify_shard_order(ccv);
});
}
m = mutation(s, m1.decorated_key(), m1.partition().difference(s, m3.partition()));
BOOST_REQUIRE_EQUAL(m.partition().clustered_rows().calculate_size(), 0);
@@ -434,34 +444,38 @@ SEASTAR_TEST_CASE(test_transfer_updates_to_shards) {
auto ac = get_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 5);
verify_shard_order(ccv);
});
}
ac = get_static_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 4);
verify_shard_order(ccv);
});
}
m = m2;
transform_counter_updates_to_shards(m, &m0, 0, utils::UUID{});
ac = get_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 14);
verify_shard_order(ccv);
});
}
ac = get_static_counter_cell(m);
BOOST_REQUIRE(ac.is_live());
counter_cell_view::with_linearized(ac, [&] (counter_cell_view ccv) {
{
counter_cell_view ccv(ac);
BOOST_REQUIRE_EQUAL(ccv.total_value(), 12);
verify_shard_order(ccv);
});
}
m = m3;
transform_counter_updates_to_shards(m, &m0, 0, utils::UUID{});
@@ -519,14 +533,14 @@ SEASTAR_TEST_CASE(test_sanitize_corrupted_cells) {
auto c2 = atomic_cell_or_collection(b2.build(0));
// Compare
counter_cell_view::with_linearized(c1.as_atomic_cell(cdef), [&] (counter_cell_view cv1) {
counter_cell_view::with_linearized(c2.as_atomic_cell(cdef), [&] (counter_cell_view cv2) {
{
counter_cell_view cv1(c1.as_atomic_cell(cdef));
counter_cell_view cv2(c2.as_atomic_cell(cdef));
BOOST_REQUIRE_EQUAL(cv1, cv2);
BOOST_REQUIRE_EQUAL(cv1.total_value(), cv2.total_value());
verify_shard_order(cv1);
verify_shard_order(cv2);
});
});
}
}
});
}

View File

@@ -1,847 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include <seastar/testing/test_case.hh>
#include <seastar/testing/thread_test_case.hh>
#include <algorithm>
#include <random>
#include <boost/range/irange.hpp>
#include <boost/range/algorithm/copy.hpp>
#include <boost/range/algorithm/generate.hpp>
#include <seastar/util/variant_utils.hh>
#include "imr/fundamental.hh"
#include "imr/compound.hh"
#include "imr/methods.hh"
#include "imr/utils.hh"
#include "test/lib/failure_injecting_allocation_strategy.hh"
#include "utils/logalloc.hh"
#include "test/lib/random_utils.hh"
static constexpr auto random_test_iteration_count = 20;
class A;
class B;
class C;
class D;
BOOST_AUTO_TEST_SUITE(fundamental);
template<typename FillAB, typename FillBC>
struct generate_flags_type;
template<size_t... IdxAB, size_t... IdxBC>
struct generate_flags_type<std::index_sequence<IdxAB...>, std::index_sequence<IdxBC...>> {
using type = imr::flags<A, std::integral_constant<size_t, IdxAB>...,
B, std::integral_constant<ssize_t, IdxBC>..., C>;
};
SEASTAR_THREAD_TEST_CASE(test_flags) {
using flags_type = generate_flags_type<std::make_index_sequence<7>, std::make_index_sequence<8>>::type;
static constexpr size_t expected_size = 3;
BOOST_CHECK_EQUAL(flags_type::size_when_serialized(), expected_size);
BOOST_CHECK_EQUAL(flags_type::size_when_serialized(imr::set_flag<A>(),
imr::set_flag<B>(),
imr::set_flag<C>()), expected_size);
uint8_t buffer[expected_size];
std::fill_n(buffer, expected_size, 0xbe);
BOOST_CHECK_EQUAL(flags_type::serialize(buffer, imr::set_flag<B>()), expected_size);
auto mview = flags_type::make_view(buffer);
BOOST_CHECK(!mview.get<A>());
BOOST_CHECK(mview.get<B>());
BOOST_CHECK(!mview.get<C>());
mview.set<A>();
mview.set<B>(false);
BOOST_CHECK(mview.get<A>());
BOOST_CHECK(!mview.get<B>());
BOOST_CHECK(!mview.get<C>());
flags_type::view view = mview;
mview.set<C>();
BOOST_CHECK(view.get<A>());
BOOST_CHECK(!view.get<B>());
BOOST_CHECK(view.get<C>());
BOOST_CHECK_EQUAL(flags_type::serialized_object_size(buffer), expected_size);
int some_context;
BOOST_CHECK_EQUAL(flags_type::serialized_object_size(buffer, some_context), expected_size);
std::fill_n(buffer, expected_size, 0xff);
BOOST_CHECK_EQUAL(flags_type::serialize(buffer), expected_size);
BOOST_CHECK(!mview.get<A>());
BOOST_CHECK(!mview.get<B>());
BOOST_CHECK(!mview.get<C>());
}
struct test_pod_type {
int32_t x;
uint64_t y;
friend bool operator==(const test_pod_type& a, const test_pod_type& b) {
return a.x == b.x && a.y == b.y;
}
friend std::ostream& operator<<(std::ostream& os, const test_pod_type& obj) {
return os << "test_pod_type { x: " << obj.x << ", y: " << obj.y << " }";
}
};
SEASTAR_THREAD_TEST_CASE(test_pod) {
auto generate_object = [] {
std::uniform_int_distribution<decltype(test_pod_type::x)> dist_x;
std::uniform_int_distribution<decltype(test_pod_type::y)> dist_y;
return test_pod_type { dist_x(tests::random::gen()), dist_y(tests::random::gen()) };
};
using pod_type = imr::pod<test_pod_type>;
uint8_t buffer[pod_type::size];
for (auto i = 0; i < random_test_iteration_count; i++) {
auto obj = generate_object();
BOOST_CHECK_EQUAL(pod_type::size_when_serialized(obj), pod_type::size);
BOOST_CHECK_EQUAL(pod_type::serialize(buffer, obj), pod_type::size);
BOOST_CHECK_EQUAL(pod_type::serialized_object_size(buffer), pod_type::size);
int some_context;
BOOST_CHECK_EQUAL(pod_type::serialized_object_size(buffer, some_context), pod_type::size);
auto mview = pod_type::make_view(buffer);
pod_type::view view = mview;
BOOST_CHECK_EQUAL(mview.load(), obj);
BOOST_CHECK_EQUAL(view.load(), obj);
auto obj2 = generate_object();
mview.store(obj2);
BOOST_CHECK_EQUAL(mview.load(), obj2);
BOOST_CHECK_EQUAL(view.load(), obj2);
}
}
class test_buffer_context {
size_t _size;
public:
explicit test_buffer_context(size_t sz) : _size(sz) { }
template<typename Tag>
size_t size_of() const noexcept;
};
template<>
size_t test_buffer_context::size_of<A>() const noexcept {
return _size;
}
SEASTAR_THREAD_TEST_CASE(test_buffer) {
using buffer_type = imr::buffer<A>;
auto test = [] (auto serialize) {
auto data = tests::random::get_bytes();
auto size = data.size();
auto buffer = std::make_unique<uint8_t[]>(size);
serialize(buffer.get(), size, data);
const auto ctx = test_buffer_context(size);
BOOST_CHECK_EQUAL(buffer_type::serialized_object_size(buffer.get(), ctx), size);
BOOST_CHECK(boost::range::equal(buffer_type::make_view(buffer.get(), ctx), data));
BOOST_CHECK(boost::range::equal(buffer_type::make_view(const_cast<const uint8_t*>(buffer.get()), ctx), data));
BOOST_CHECK_EQUAL(buffer_type::make_view(buffer.get(), ctx).size(), size);
};
for (auto i = 0; i < random_test_iteration_count; i++) {
test([] (uint8_t* out, size_t size, const bytes& data) {
BOOST_CHECK_EQUAL(buffer_type::size_when_serialized(data), size);
BOOST_CHECK_EQUAL(buffer_type::serialize(out, data), size);
});
test([] (uint8_t* out, size_t size, const bytes& data) {
auto serializer = [&data] (uint8_t* out) noexcept {
boost::range::copy(data, out);
};
BOOST_CHECK_EQUAL(buffer_type::size_when_serialized(size, serializer), size);
BOOST_CHECK_EQUAL(buffer_type::serialize(out, size, serializer), size);
});
}
}
BOOST_AUTO_TEST_SUITE_END();
BOOST_AUTO_TEST_SUITE(compound);
struct test_optional_context {
template<typename Tag>
bool is_present() const noexcept;
template<typename Tag, typename... Args>
decltype(auto) context_for(Args&&...) const noexcept { return *this; }
};
template<>
bool test_optional_context::is_present<A>() const noexcept {
return true;
}
template<>
bool test_optional_context::is_present<B>() const noexcept {
return false;
}
SEASTAR_THREAD_TEST_CASE(test_optional) {
using optional_type1 = imr::optional<A, imr::pod<uint32_t>>;
using optional_type2 = imr::optional<B, imr::pod<uint32_t>>;
for (auto i = 0; i < random_test_iteration_count; i++) {
auto value = tests::random::get_int<uint32_t>();
auto expected_size = imr::pod<uint32_t>::size_when_serialized(value);
auto buffer = std::make_unique<uint8_t[]>(expected_size);
BOOST_CHECK_EQUAL(optional_type1::size_when_serialized(value), expected_size);
BOOST_CHECK_EQUAL(optional_type1::serialize(buffer.get(), value), expected_size);
BOOST_CHECK_EQUAL(optional_type1::serialized_object_size(buffer.get(), test_optional_context()), expected_size);
BOOST_CHECK_EQUAL(optional_type2::serialized_object_size(buffer.get(), test_optional_context()), 0);
auto view = optional_type1::make_view(buffer.get());
BOOST_CHECK_EQUAL(view.get().load(), value);
}
}
static constexpr auto data_size = 128;
using variant_type = imr::variant<A,
imr::member<B, imr::pod<uint64_t>>,
imr::member<C, imr::buffer<C>>,
imr::member<D, imr::pod<int64_t>>>;
struct test_variant_context {
unsigned _alternative_idx;
public:
template<typename Tag>
size_t size_of() const noexcept;
template<typename Tag>
auto active_alternative_of() const noexcept;
template<typename Tag, typename... Args>
decltype(auto) context_for(Args&&...) const noexcept { return *this; }
};
template<>
size_t test_variant_context::size_of<C>() const noexcept {
return data_size;
}
template<>
auto test_variant_context::active_alternative_of<A>() const noexcept {
switch (_alternative_idx) {
case 0:
return variant_type::index_for<B>();
case 1:
return variant_type::index_for<C>();
case 2:
return variant_type::index_for<D>();
default:
BOOST_FAIL("should not reach");
abort();
}
}
SEASTAR_THREAD_TEST_CASE(test_variant) {
for (auto i = 0; i < random_test_iteration_count; i++) {
unsigned alternative_idx = tests::random::get_int<unsigned>(2);
uint64_t uinteger = tests::random::get_int<uint64_t>();
int64_t integer = tests::random::get_int<int64_t>();
bytes data = tests::random::get_bytes(data_size);
const size_t expected_size = alternative_idx == 0
? imr::pod<uint64_t>::size_when_serialized(uinteger)
: (alternative_idx == 1 ? data_size : sizeof(int64_t));
auto buffer = std::make_unique<uint8_t[]>(expected_size);
if (!alternative_idx) {
BOOST_CHECK_EQUAL(variant_type::size_when_serialized<B>(uinteger), expected_size);
BOOST_CHECK_EQUAL(variant_type::serialize<B>(buffer.get(), uinteger), expected_size);
} else if (alternative_idx == 1) {
BOOST_CHECK_EQUAL(variant_type::size_when_serialized<C>(data), expected_size);
BOOST_CHECK_EQUAL(variant_type::serialize<C>(buffer.get(), data), expected_size);
} else {
BOOST_CHECK_EQUAL(variant_type::size_when_serialized<D>(integer), expected_size);
BOOST_CHECK_EQUAL(variant_type::serialize<D>(buffer.get(), integer), expected_size);
}
auto ctx = test_variant_context { alternative_idx };
BOOST_CHECK_EQUAL(variant_type::serialized_object_size(buffer.get(), ctx), expected_size);
auto view = variant_type::make_view(buffer.get(), ctx);
bool visitor_was_called = false;
view.visit(make_visitor(
[&] (imr::pod<uint64_t>::view val) {
visitor_was_called = true;
if (alternative_idx == 0) {
BOOST_CHECK_EQUAL(val.load(), uinteger);
} else {
BOOST_FAIL("wrong variant alternative (B)");
}
},
[&] (imr::buffer<C>::view buf) {
visitor_was_called = true;
if (alternative_idx == 1) {
BOOST_CHECK(boost::equal(data, buf));
} else {
BOOST_FAIL("wrong variant alternative (C)");
}
},
[&] (imr::pod<int64_t>::view val) {
visitor_was_called = true;
if (alternative_idx == 2) {
BOOST_CHECK_EQUAL(val.load(), integer);
} else {
BOOST_FAIL("wrong variant alternative (D)");
}
}
), ctx);
BOOST_CHECK(visitor_was_called);
}
}
SEASTAR_THREAD_TEST_CASE(test_structure_with_fixed) {
using S = imr::structure<imr::member<A, imr::pod<uint8_t>>,
imr::member<B, imr::pod<int64_t>>,
imr::member<C, imr::pod<uint32_t>>>;
static constexpr auto expected_size = sizeof(uint8_t) + sizeof(uint64_t)
+ sizeof(uint32_t);
for (auto i = 0; i < random_test_iteration_count; i++) {
auto a = tests::random::get_int<uint8_t>();
auto b = tests::random::get_int<uint64_t>();
auto c = tests::random::get_int<uint32_t>();
auto writer = [&] (auto&& serializer) noexcept {
return serializer
.serialize(a)
.serialize(b)
.serialize(c)
.done();
};
uint8_t buffer[expected_size];
BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size);
BOOST_CHECK_EQUAL(S::serialize(buffer, writer), expected_size);
BOOST_CHECK_EQUAL(S::serialized_object_size(buffer), expected_size);
auto mview = S::make_view(buffer);
BOOST_CHECK_EQUAL(mview.get<A>().load(), a);
BOOST_CHECK_EQUAL(mview.get<B>().load(), b);
BOOST_CHECK_EQUAL(mview.get<C>().load(), c);
auto view = S::make_view(const_cast<const uint8_t*>(buffer));
BOOST_CHECK_EQUAL(view.get<A>().load(), a);
BOOST_CHECK_EQUAL(view.get<B>().load(), b);
BOOST_CHECK_EQUAL(view.get<C>().load(), c);
a = tests::random::get_int<uint8_t>();
b = tests::random::get_int<uint64_t>();
c = tests::random::get_int<uint32_t>();
mview.get<A>().store(a);
mview.get<B>().store(b);
mview.get<C>().store(c);
BOOST_CHECK_EQUAL(view.get<A>().load(), a);
BOOST_CHECK_EQUAL(view.get<B>().load(), b);
BOOST_CHECK_EQUAL(view.get<C>().load(), c);
}
}
class test_structure_context {
bool _b_is_present;
size_t _c_size_of;
public:
test_structure_context(bool b_is_present, size_t c_size_of) noexcept
: _b_is_present(b_is_present), _c_size_of(c_size_of) { }
template<typename Tag>
bool is_present() const noexcept;
template<typename Tag>
size_t size_of() const noexcept;
template<typename Tag, typename... Args>
decltype(auto) context_for(Args&&...) const noexcept { return *this; }
};
template<>
bool test_structure_context::is_present<B>() const noexcept {
return _b_is_present;
}
template<>
size_t test_structure_context::size_of<C>() const noexcept {
return _c_size_of;
}
SEASTAR_THREAD_TEST_CASE(test_structure_with_context) {
using S = imr::structure<imr::member<A, imr::flags<B, C>>,
imr::optional_member<B, imr::pod<uint16_t>>,
imr::member<C, imr::buffer<C>>>;
for (auto i = 0; i < random_test_iteration_count; i++) {
auto b_value = tests::random::get_int<uint16_t>();
auto c_data = tests::random::get_bytes();
const auto expected_size = 1 + imr::pod<uint16_t>::size_when_serialized(b_value)
+ c_data.size();
auto writer = [&] (auto&& serializer) noexcept {
return serializer
.serialize(imr::set_flag<B>())
.serialize(b_value)
.serialize(c_data)
.done();
};
BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size);
auto buffer = std::make_unique<uint8_t[]>(expected_size);
BOOST_CHECK_EQUAL(S::serialize(buffer.get(), writer), expected_size);
auto ctx = test_structure_context(true, c_data.size());
BOOST_CHECK_EQUAL(S::serialized_object_size(buffer.get(), ctx), expected_size);
auto mview = S::make_view(buffer.get(), ctx);
BOOST_CHECK(mview.get<A>().get<B>());
BOOST_CHECK(!mview.get<A>().get<C>());
BOOST_CHECK_EQUAL(mview.get<B>().get().load(), b_value);
BOOST_CHECK(boost::range::equal(mview.get<C>(ctx), c_data));
auto view = S::view(mview);
BOOST_CHECK(view.get<A>().get<B>());
BOOST_CHECK(!view.get<A>().get<C>());
BOOST_CHECK_EQUAL(view.get<B>().get().load(), b_value);
BOOST_CHECK(boost::range::equal(view.get<C>(ctx), c_data));
}
}
SEASTAR_THREAD_TEST_CASE(test_structure_get_element_without_view) {
using S = imr::structure<imr::member<A, imr::flags<B, C>>,
imr::member<B, imr::pod<uint64_t>>,
imr::optional_member<C, imr::pod<uint16_t>>>;
auto uinteger = tests::random::get_int<uint64_t>();
static constexpr auto expected_size = 1 + sizeof(uint64_t);
auto writer = [&] (auto&& serializer) noexcept {
return serializer
.serialize(imr::set_flag<B>())
.serialize(uinteger)
.skip()
.done();
};
BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size);
uint8_t buffer[expected_size];
BOOST_CHECK_EQUAL(S::serialize(buffer, writer), expected_size);
auto fview = S::get_member<A>(buffer);
BOOST_CHECK(fview.get<B>());
BOOST_CHECK(!fview.get<C>());
auto uview = S::get_member<B>(buffer);
BOOST_CHECK_EQUAL(uview.load(), uinteger);
// FIXME test offset
}
SEASTAR_THREAD_TEST_CASE(test_nested_structure) {
using S1 = imr::structure<imr::optional_member<B, imr::pod<uint16_t>>,
imr::member<C, imr::buffer<C>>,
imr::member<A, imr::pod<uint8_t>>>;
using S = imr::structure<imr::member<A, imr::pod<uint16_t>>,
imr::member<B, S1>,
imr::member<C, imr::pod<uint32_t>>>;
for (auto i = 0; i < random_test_iteration_count; i++) {
auto b1_value = tests::random::get_int<uint16_t>();
auto c1_data = tests::random::get_bytes();
auto a1_value = tests::random::get_int<uint8_t>();
const auto expected_size1 = imr::pod<uint16_t>::size_when_serialized(b1_value)
+ c1_data.size() + sizeof(uint8_t);
auto a_value = tests::random::get_int<uint16_t>();
auto c_value = tests::random::get_int<uint32_t>();
const auto expected_size = sizeof(uint16_t) + expected_size1 + sizeof(uint32_t);
auto writer1 = [&] (auto&& serializer) noexcept {
return serializer
.serialize(b1_value)
.serialize(c1_data)
.serialize(a1_value)
.done();
};
auto writer = [&] (auto&& serializer) noexcept {
return serializer
.serialize(a_value)
.serialize(writer1)
.serialize(c_value)
.done();
};
BOOST_CHECK_EQUAL(S::size_when_serialized(writer), expected_size);
auto buffer = std::make_unique<uint8_t[]>(expected_size);
BOOST_CHECK_EQUAL(S::serialize(buffer.get(), writer), expected_size);
auto ctx = test_structure_context(true, c1_data.size());
BOOST_CHECK_EQUAL(S::serialized_object_size(buffer.get(), ctx), expected_size);
auto view = S::make_view(buffer.get(), ctx);
BOOST_CHECK_EQUAL(view.get<A>().load(), a_value);
BOOST_CHECK_EQUAL(view.get<B>(ctx).get<B>().get().load(), b1_value);
BOOST_CHECK(boost::range::equal(view.get<B>(ctx).get<C>(ctx), c1_data));
BOOST_CHECK_EQUAL(view.get<C>(ctx).load(), c_value);
}
}
BOOST_AUTO_TEST_SUITE_END();
struct object_with_destructor {
static size_t destruction_count;
static uint64_t last_destroyed_one;
static void reset() {
destruction_count = 0;
last_destroyed_one = 0;
}
uint64_t value;
};
size_t object_with_destructor::destruction_count = 0;
uint64_t object_with_destructor::last_destroyed_one = 0;
struct object_without_destructor {
uint64_t value;
};
namespace imr {
namespace methods {
template<>
struct destructor<pod<object_with_destructor>> {
template<typename... Args>
static void run(uint8_t* ptr, Args&&...) noexcept {
object_with_destructor::destruction_count++;
auto view = imr::pod<object_with_destructor>::make_view(ptr);
object_with_destructor::last_destroyed_one = view.load().value;
}
};
}
}
BOOST_AUTO_TEST_SUITE(methods);
SEASTAR_THREAD_TEST_CASE(test_simple_destructor) {
object_with_destructor::reset();
using O1 = imr::pod<object_with_destructor>;
using O2 = imr::pod<object_without_destructor>;
BOOST_CHECK(!imr::methods::is_trivially_destructible<O1>::value);
BOOST_CHECK(imr::methods::is_trivially_destructible<O2>::value);
static constexpr auto expected_size = sizeof(object_with_destructor);
uint8_t buffer[expected_size];
auto value = tests::random::get_int<uint64_t>();
BOOST_CHECK_EQUAL(O1::serialize(buffer, object_with_destructor { value }), expected_size);
imr::methods::destroy<O1>(buffer);
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value);
imr::methods::destroy<O2>(buffer);
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value);
}
SEASTAR_THREAD_TEST_CASE(test_structure_destructor) {
object_with_destructor::reset();
using S = imr::structure<imr::member<A, imr::pod<object_with_destructor>>,
imr::member<B, imr::pod<object_without_destructor>>,
imr::member<C, imr::pod<object_with_destructor>>>;
using S1 = imr::structure<imr::member<A, imr::pod<object_without_destructor>>,
imr::member<B, imr::pod<object_without_destructor>>,
imr::member<C, imr::pod<object_without_destructor>>>;
BOOST_CHECK(!imr::methods::is_trivially_destructible<S>::value);
BOOST_CHECK(imr::methods::is_trivially_destructible<S1>::value);
static constexpr auto expected_size = sizeof(object_with_destructor) * 3;
uint8_t buffer[expected_size];
auto a = tests::random::get_int<uint64_t>();
auto b = tests::random::get_int<uint64_t>();
auto c = tests::random::get_int<uint64_t>();
BOOST_CHECK_EQUAL(S::serialize(buffer, [&] (auto serializer) noexcept {
return serializer
.serialize(object_with_destructor { a })
.serialize(object_without_destructor { b })
.serialize(object_with_destructor { c })
.done();
}), expected_size);
imr::methods::destroy<S>(buffer);
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 2);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, c);
imr::methods::destroy<S1>(buffer);
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 2);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, c);
}
SEASTAR_THREAD_TEST_CASE(test_optional_destructor) {
object_with_destructor::reset();
using O1 = imr::optional<A, imr::pod<object_with_destructor>>;
using O2 = imr::optional<B, imr::pod<object_with_destructor>>;
using O3 = imr::optional<A, imr::pod<object_without_destructor>>;
BOOST_CHECK(!imr::methods::is_trivially_destructible<O1>::value);
BOOST_CHECK(!imr::methods::is_trivially_destructible<O2>::value);
BOOST_CHECK(imr::methods::is_trivially_destructible<O3>::value);
static constexpr auto expected_size = sizeof(object_with_destructor);
uint8_t buffer[expected_size];
auto value = tests::random::get_int<uint64_t>();
BOOST_CHECK_EQUAL(O1::serialize(buffer, object_with_destructor { value }), expected_size);
imr::methods::destroy<O2>(buffer, compound::test_optional_context());
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 0);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, 0);
imr::methods::destroy<O1>(buffer, compound::test_optional_context());
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value);
imr::methods::destroy<O3>(buffer, compound::test_optional_context());
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value);
}
using V = imr::variant<A,
imr::member<B, imr::pod<object_with_destructor>>,
imr::member<C, imr::pod<object_without_destructor>>>;
struct test_variant_context {
bool _alternative_b;
public:
template<typename Tag>
auto active_alternative_of() const noexcept;
template<typename Tag>
decltype(auto) context_for(...) const noexcept { return *this; }
};
template<>
auto test_variant_context::active_alternative_of<A>() const noexcept {
if (_alternative_b) {
return V::index_for<B>();
} else {
return V::index_for<C>();
}
}
SEASTAR_THREAD_TEST_CASE(test_variant_destructor) {
object_with_destructor::reset();
using V1 = imr::variant<A, imr::member<B, imr::pod<object_without_destructor>>>;
BOOST_CHECK(!imr::methods::is_trivially_destructible<V>::value);
BOOST_CHECK(imr::methods::is_trivially_destructible<V1>::value);
static constexpr auto expected_size = sizeof(object_with_destructor);
uint8_t buffer[expected_size];
auto value = tests::random::get_int<uint64_t>();
BOOST_CHECK_EQUAL(V::serialize<B>(buffer, object_with_destructor { value }), expected_size);
imr::methods::destroy<V>(buffer, test_variant_context { false });
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 0);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, 0);
imr::methods::destroy<V>(buffer, test_variant_context { true });
BOOST_CHECK_EQUAL(object_with_destructor::destruction_count, 1);
BOOST_CHECK_EQUAL(object_with_destructor::last_destroyed_one, value);
}
BOOST_AUTO_TEST_SUITE_END();
namespace object_exception_safety {
using nested_structure = imr::structure<
imr::member<A, imr::pod<size_t>>,
imr::member<B, imr::buffer<B>>
>;
using structure = imr::structure<
imr::member<A, imr::pod<size_t>>,
imr::member<C, imr::tagged_type<C, imr::pod<void*>>>,
imr::member<D, imr::tagged_type<C, imr::pod<void*>>>,
imr::member<B, imr::buffer<A>>
>;
struct structue_context {
size_t _size;
structue_context(const uint8_t* ptr)
: _size(imr::pod<size_t>::make_view(ptr).load())
{
BOOST_CHECK_EQUAL(_size, 4);
}
template<typename Tag>
size_t size_of() const noexcept {
return _size;
}
template<typename Tag, typename... Args>
decltype(auto) context_for(Args&&...) const noexcept { return *this; }
};
struct nested_structue_context {
size_t _size;
nested_structue_context(const uint8_t* ptr)
: _size(imr::pod<size_t>::make_view(ptr).load())
{
BOOST_CHECK_NE(_size, 0);
}
template<typename Tag>
size_t size_of() const noexcept {
return _size;
}
template<typename Tag, typename... Args>
decltype(auto) context_for(Args&&...) const noexcept { return *this; }
};
}
namespace imr::methods {
template<>
struct destructor<imr::tagged_type<C, imr::pod<void*>>> {
static void run(uint8_t* ptr, ...) {
using namespace object_exception_safety;
auto obj_ptr = imr::pod<uint8_t*>::make_view(ptr).load();
imr::methods::destroy<nested_structure>(obj_ptr, nested_structue_context(obj_ptr));
current_allocator().free(obj_ptr);
}
};
}
SEASTAR_THREAD_TEST_CASE(test_object_exception_safety) {
using namespace object_exception_safety;
using context_factory_for_structure = imr::alloc::context_factory<imr::utils::object_context<structue_context>>;
using lsa_migrator_fn_for_structure = imr::alloc::lsa_migrate_fn<imr::utils::object<structure>::structure, context_factory_for_structure>;
auto migrator_for_structure = lsa_migrator_fn_for_structure(context_factory_for_structure());
using context_factory_for_nested_structure = imr::alloc::context_factory<nested_structue_context>;
using lsa_migrator_fn_for_nested_structure = imr::alloc::lsa_migrate_fn<nested_structure, context_factory_for_nested_structure>;
auto migrator_for_nested_structure = lsa_migrator_fn_for_nested_structure(context_factory_for_nested_structure());
auto writer_fn = [&] (auto serializer, auto& allocator) {
return serializer
.serialize(4)
.serialize(allocator.template allocate<nested_structure>(
&migrator_for_nested_structure,
[&] (auto nested_serializer) {
return nested_serializer
.serialize(128)
.serialize(128, [] (auto&&...) noexcept { })
.done();
}
))
.serialize(allocator.template allocate<nested_structure>(
&migrator_for_nested_structure,
[&] (auto nested_serializer) {
return nested_serializer
.serialize(1024)
.serialize(1024, [] (auto&&...) noexcept { })
.done();
}
))
.serialize(bytes(4, 'a'))
.done();
};
logalloc::region reg;
size_t fail_offset = 0;
auto allocator = failure_injecting_allocation_strategy(reg.allocator());
with_allocator(allocator, [&] {
while (true) {
allocator.fail_after(fail_offset++);
try {
imr::utils::object<structure>::make(writer_fn, &migrator_for_structure);
} catch (const std::bad_alloc&) {
BOOST_CHECK_EQUAL(reg.occupancy().used_space(), 0);
continue;
}
BOOST_CHECK_EQUAL(reg.occupancy().used_space(), 0);
break;
}
});
BOOST_CHECK_EQUAL(fail_offset, 4);
}

View File

@@ -1,220 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#define BOOST_TEST_MODULE meta
#include <boost/test/unit_test.hpp>
#include <seastar/util/log.hh>
#include "utils/meta.hh"
namespace internal {
template<typename T>
struct check_constexpr {
template<T N>
struct check {
enum : T {
value = N,
};
};
};
template<typename T>
struct first_argument { };
template<typename R, typename T, typename... Ts>
struct first_argument<R(T, Ts...)> {
using type = T;
};
}
#define INTERNAL_STATIC_CHECK_EQUAL(expected, actual, actual_str) \
BOOST_CHECK_MESSAGE(internal::check_constexpr<std::decay_t<decltype(actual)>>::check<(actual)>::value == (expected), \
actual_str " expected to be equal " #expected " [actual: " << (actual) << ", expected: " << (expected) << "]")
#define INTERNAL_STATIC_CHECK_SAME(expr, expected, actual, actual_str) \
BOOST_CHECK_MESSAGE(expr, actual_str " expected to be the same as " #expected \
" [actual: " << seastar::pretty_type_name(typeid(typename internal::first_argument<void(actual)>::type)) << ", expected: " \
<< seastar::pretty_type_name(typeid(internal::first_argument<void(expected)>::type)) << "]")
#define STATIC_CHECK_EQUAL(expected, ...) \
INTERNAL_STATIC_CHECK_EQUAL(expected, (__VA_ARGS__), #__VA_ARGS__)
#define STATIC_CHECK_SAME(expected, ...) \
INTERNAL_STATIC_CHECK_SAME((std::is_same<__VA_ARGS__, typename internal::first_argument<void(expected)>::type>::value), expected, (__VA_ARGS__), #__VA_ARGS__)
class A { };
class B { };
class C { };
class D { };
BOOST_AUTO_TEST_CASE(find) {
STATIC_CHECK_EQUAL(0, meta::find<A, A, B, C, D>);
STATIC_CHECK_EQUAL(1, meta::find<B, A, B, C, D>);
STATIC_CHECK_EQUAL(2, meta::find<C, A, B, C, D>);
STATIC_CHECK_EQUAL(3, meta::find<D, A, B, C, D>);
STATIC_CHECK_EQUAL(0, meta::find<A, A>);
STATIC_CHECK_EQUAL(0, meta::find<A, A, A>);
STATIC_CHECK_EQUAL(1, meta::find<A, B, A, A>);
STATIC_CHECK_EQUAL(0, meta::find<A, meta::list<A, B, C, D>>);
STATIC_CHECK_EQUAL(1, meta::find<B, meta::list<A, B, C, D>>);
STATIC_CHECK_EQUAL(2, meta::find<C, meta::list<A, B, C, D>>);
STATIC_CHECK_EQUAL(3, meta::find<D, meta::list<A, B, C, D>>);
STATIC_CHECK_EQUAL(0, meta::find<A, meta::list<A>>);
STATIC_CHECK_EQUAL(0, meta::find<A, meta::list<A, A>>);
STATIC_CHECK_EQUAL(1, meta::find<A, meta::list<B, A, A>>);
STATIC_CHECK_EQUAL(1, meta::find<meta::list<A>, meta::list<B>, meta::list<A>>);
STATIC_CHECK_EQUAL(1, meta::find<meta::list<A>, meta::list<meta::list<B>, meta::list<A>>>);
}
BOOST_AUTO_TEST_CASE(get) {
STATIC_CHECK_SAME(A, meta::get<0, A, B, C, D>);
STATIC_CHECK_SAME(B, meta::get<1, A, B, C, D>);
STATIC_CHECK_SAME(C, meta::get<2, A, B, C, D>);
STATIC_CHECK_SAME(D, meta::get<3, A, B, C, D>);
STATIC_CHECK_SAME(A, meta::get<0, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME(B, meta::get<1, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME(C, meta::get<2, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME(D, meta::get<3, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME(A, meta::get<0, meta::list<A>>);
STATIC_CHECK_SAME(meta::list<A>, meta::get<0, meta::list<meta::list<A>>>);
}
BOOST_AUTO_TEST_CASE(take) {
STATIC_CHECK_SAME(meta::list<A>, meta::take<1, A, B, C, D>);
STATIC_CHECK_SAME((meta::list<A, B>), meta::take<2, A, B, C, D>);
STATIC_CHECK_SAME((meta::list<A, B, C>), meta::take<3, A, B, C, D>);
STATIC_CHECK_SAME((meta::list<A, B, C, D>), meta::take<4, A, B, C, D>);
STATIC_CHECK_SAME(meta::list<A>, meta::take<1, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME((meta::list<A, B>), meta::take<2, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME((meta::list<A, B, C>), meta::take<3, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME((meta::list<A, B, C, D>), meta::take<4, meta::list<A, B, C, D>>);
STATIC_CHECK_SAME(meta::list<A>, meta::take<1, meta::list<A>>);
STATIC_CHECK_SAME(meta::list<meta::list<A>>, meta::take<1, meta::list<meta::list<A>>>);
STATIC_CHECK_SAME((meta::list<meta::list<A, B>>), meta::take<1, meta::list<meta::list<A, B>>>);
}
BOOST_AUTO_TEST_CASE(size) {
STATIC_CHECK_EQUAL(0, meta::size<>);
STATIC_CHECK_EQUAL(1, meta::size<A>);
STATIC_CHECK_EQUAL(2, meta::size<A, B>);
STATIC_CHECK_EQUAL(3, meta::size<A, B, C>);
STATIC_CHECK_EQUAL(4, meta::size<A, B, C, D>);
STATIC_CHECK_EQUAL(0, meta::size<meta::list<>>);
STATIC_CHECK_EQUAL(1, meta::size<meta::list<A>>);
STATIC_CHECK_EQUAL(2, meta::size<meta::list<A, B>>);
STATIC_CHECK_EQUAL(3, meta::size<meta::list<A, B, C>>);
STATIC_CHECK_EQUAL(4, meta::size<meta::list<A, B, C, D>>);
STATIC_CHECK_EQUAL(1, meta::size<meta::list<meta::list<A, B>>>);
STATIC_CHECK_EQUAL(3, meta::size<meta::list<A, B>, C, D>);
STATIC_CHECK_EQUAL(3, meta::size<meta::list<meta::list<A, B>, C, D>>);
}
class constexpr_count_all_fn {
size_t _n = 0;
public:
constexpr constexpr_count_all_fn() = default;
template<typename T>
constexpr void operator()(T) { _n++; }
constexpr size_t get() { return _n; }
};
template<typename... Ts>
constexpr size_t constexpr_count_all()
{
constexpr_count_all_fn constexpr_fn;
meta::for_each<Ts...>(constexpr_fn);
return constexpr_fn.get();
}
BOOST_AUTO_TEST_CASE(for_each) {
STATIC_CHECK_EQUAL(0, constexpr_count_all<>());
STATIC_CHECK_EQUAL(4, constexpr_count_all<A, B, C, D>());
size_t n = 0;
meta::for_each<A, B, C, D>([&] (auto&& ptr) {
using type = std::remove_pointer_t<std::decay_t<decltype(ptr)>>;
switch (n) {
case 0: STATIC_CHECK_SAME(A, type); break;
case 1: STATIC_CHECK_SAME(B, type); break;
case 2: STATIC_CHECK_SAME(C, type); break;
case 3: STATIC_CHECK_SAME(D, type); break;
default: BOOST_FAIL("should not reach"); break;
}
n++;
});
BOOST_CHECK_EQUAL(4, n);
STATIC_CHECK_EQUAL(0, constexpr_count_all<meta::list<>>());
STATIC_CHECK_EQUAL(4, constexpr_count_all<meta::list<A, B, C, D>>());
n = 0;
meta::for_each<meta::list<A, B, C, D>>([&] (auto ptr) {
using type = std::remove_pointer_t<decltype(ptr)>;
switch (n) {
case 0: STATIC_CHECK_SAME(A, type); break;
case 1: STATIC_CHECK_SAME(B, type); break;
case 2: STATIC_CHECK_SAME(C, type); break;
case 3: STATIC_CHECK_SAME(D, type); break;
default: BOOST_FAIL("should not reach"); break;
}
n++;
});
BOOST_CHECK_EQUAL(4, n);
n = 0;
meta::for_each<meta::take<2, A, B, C, D>>([&] (auto ptr) {
using type = std::remove_pointer_t<decltype(ptr)>;
switch (n) {
case 0: STATIC_CHECK_SAME(A, type); break;
case 1: STATIC_CHECK_SAME(B, type); break;
default: BOOST_FAIL("should not reach"); break;
}
n++;
});
BOOST_CHECK_EQUAL(2, n);
n = 0;
using list = meta::list<A, B, C, D>;
meta::for_each<meta::take<meta::size<list> - 1, list>>([&] (auto ptr) {
using type = std::remove_pointer_t<decltype(ptr)>;
switch (n) {
case 0: STATIC_CHECK_SAME(A, type); break;
case 1: STATIC_CHECK_SAME(B, type); break;
case 2: STATIC_CHECK_SAME(C, type); break;
default: BOOST_FAIL("should not reach"); break;
}
n++;
});
BOOST_CHECK_EQUAL(3, n);
}

View File

@@ -432,12 +432,11 @@ static bytes make_payload(const schema& schema, size_t size, const partition_key
return std::move(buf_os).detach();
}
static bool validate_payload(const schema& schema, data::value_view payload_view, const partition_key& pk, const clustering_key* const ck) {
auto istream = fragmented_memory_input_stream(payload_view.begin(), payload_view.size_bytes());
static bool validate_payload(const schema& schema, atomic_cell_value_view payload_view, const partition_key& pk, const clustering_key* const ck) {
auto istream = fragmented_memory_input_stream(fragment_range(payload_view).begin(), payload_view.size());
auto head = ser::deserialize(istream, boost::type<blob_header>{});
const size_t actual_size = payload_view.size_bytes();
const size_t actual_size = payload_view.size();
if (head.size != actual_size) {
testlog.error("Validating payload for pk={}, ck={} failed, sizes differ: stored={}, actual={}", pk, seastar::lazy_deref(ck), head.size,

View File

@@ -2534,7 +2534,7 @@ std::deque<mutation_fragment> make_fragments_with_non_monotonic_positions(simple
size_t mem_usage = fragments.back().memory_usage();
for (int buffers = 0; buffers < 2; ++buffers) {
while (mem_usage <= max_buffer_size) {
while (mem_usage < max_buffer_size) {
fragments.emplace_back(*s.schema(), tests::make_permit(),
s.make_range_tombstone(query::clustering_range::make(s.make_ckey(0), s.make_ckey(i + 1)), tombstone_deletion_time));
mem_usage += fragments.back().memory_usage();

View File

@@ -1902,7 +1902,7 @@ SEASTAR_TEST_CASE(test_continuity_merging) {
}
class measuring_allocator final : public allocation_strategy {
size_t _allocated_bytes;
size_t _allocated_bytes = 0;
public:
measuring_allocator() {
_preferred_max_contiguous_allocation = standard_allocator().preferred_max_contiguous_allocation();
@@ -2002,7 +2002,6 @@ SEASTAR_THREAD_TEST_CASE(test_cell_external_memory_usage) {
auto before = alloc.allocated_bytes();
auto ac = atomic_cell_or_collection(atomic_cell::make_live(*dt, 1, bv));
auto after = alloc.allocated_bytes();
BOOST_CHECK_GE(ac.external_memory_usage(*dt), bv.size());
BOOST_CHECK_EQUAL(ac.external_memory_usage(*dt), after - before);
});
};
@@ -2030,7 +2029,6 @@ SEASTAR_THREAD_TEST_CASE(test_cell_external_memory_usage) {
auto before = alloc.allocated_bytes();
auto cell2 = cell.copy(*collection_type);
auto after = alloc.allocated_bytes();
BOOST_CHECK_GE(cell2.external_memory_usage(*collection_type), bv.size());
BOOST_CHECK_EQUAL(cell2.external_memory_usage(*collection_type), cell.external_memory_usage(*collection_type));
BOOST_CHECK_EQUAL(cell2.external_memory_usage(*collection_type), after - before);
});

View File

@@ -1607,14 +1607,15 @@ SEASTAR_THREAD_TEST_CASE(test_uncompressed_counters_read) {
assertions.push_back([&, timestamp, value, clock] (const column_definition& def,
const atomic_cell_or_collection* cell) {
BOOST_REQUIRE(def.is_counter());
counter_cell_view::with_linearized(cell->as_atomic_cell(def), [&] (counter_cell_view cv) {
{
counter_cell_view cv(cell->as_atomic_cell(def));
BOOST_REQUIRE_EQUAL(timestamp, cv.timestamp());
BOOST_REQUIRE_EQUAL(1, cv.shard_count());
auto shard = cv.get_shard(HOST_ID);
BOOST_REQUIRE(shard);
BOOST_REQUIRE_EQUAL(value, shard->value());
BOOST_REQUIRE_EQUAL(clock, shard->logical_clock());
});
}
});
return assertions;

View File

@@ -1156,8 +1156,8 @@ SEASTAR_TEST_CASE(compact) {
auto &cells = row.cells();
auto& cdef1 = *s->get_column_definition("age");
auto& cdef2 = *s->get_column_definition("height");
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == bytes({0,0,0,40}));
BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == bytes({0,0,0,(int8_t)170}));
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,40}));
BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == managed_bytes({0,0,0,(int8_t)170}));
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
}).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
@@ -1170,8 +1170,8 @@ SEASTAR_TEST_CASE(compact) {
auto &cells = row.cells();
auto& cdef1 = *s->get_column_definition("age");
auto& cdef2 = *s->get_column_definition("height");
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == bytes({0,0,0,20}));
BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == bytes({0,0,0,(int8_t)180}));
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,20}));
BOOST_REQUIRE(cells.cell_at(cdef2.id).as_atomic_cell(cdef2).value() == managed_bytes({0,0,0,(int8_t)180}));
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
}).then([reader, s] (mutation_opt m) {
BOOST_REQUIRE(m);
@@ -1184,7 +1184,7 @@ SEASTAR_TEST_CASE(compact) {
auto &cells = row.cells();
auto& cdef1 = *s->get_column_definition("age");
auto& cdef2 = *s->get_column_definition("height");
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == bytes({0,0,0,20}));
BOOST_REQUIRE(cells.cell_at(cdef1.id).as_atomic_cell(cdef1).value() == managed_bytes({0,0,0,20}));
BOOST_REQUIRE(cells.find_cell(cdef2.id) == nullptr);
return read_mutation_from_flat_mutation_reader(*reader, db::no_timeout);
}).then([reader, s] (mutation_opt m) {
@@ -2450,7 +2450,7 @@ SEASTAR_TEST_CASE(check_multi_schema) {
auto& cells = row.cells();
BOOST_REQUIRE_EQUAL(cells.size(), 1);
auto& cdef = *s->get_column_definition("e");
BOOST_REQUIRE_EQUAL(cells.cell_at(cdef.id).as_atomic_cell(cdef).value(), int32_type->decompose(5));
BOOST_REQUIRE_EQUAL(cells.cell_at(cdef.id).as_atomic_cell(cdef).value(), managed_bytes(int32_type->decompose(5)));
return (*reader)(db::no_timeout);
}).then([reader, s] (mutation_fragment_opt m) {
BOOST_REQUIRE(!m);
@@ -2761,7 +2761,7 @@ SEASTAR_TEST_CASE(test_counter_read) {
BOOST_REQUIRE(mfopt->is_clustering_row());
const clustering_row* cr = &mfopt->as_clustering_row();
cr->cells().for_each_cell([&] (column_id id, const atomic_cell_or_collection& c) {
counter_cell_view::with_linearized(c.as_atomic_cell(s->regular_column_at(id)), [&] (counter_cell_view ccv) {
counter_cell_view ccv(c.as_atomic_cell(s->regular_column_at(id)));
auto& col = s->column_at(column_kind::regular_column, id);
if (col.name_as_text() == "c1") {
BOOST_REQUIRE_EQUAL(ccv.total_value(), 13);
@@ -2782,7 +2782,6 @@ SEASTAR_TEST_CASE(test_counter_read) {
} else {
BOOST_FAIL(format("Unexpected column \'{}\'", col.name_as_text()));
}
});
});
mfopt = reader(db::no_timeout).get0();
@@ -4959,12 +4958,11 @@ SEASTAR_TEST_CASE(test_wrong_counter_shard_order) {
size_t n = 0;
row.cells().for_each_cell([&] (column_id id, const atomic_cell_or_collection& ac_o_c) {
auto acv = ac_o_c.as_atomic_cell(s->regular_column_at(id));
counter_cell_view::with_linearized(acv, [&] (counter_cell_view ccv) {
counter_cell_view ccv(acv);
counter_shard_view::less_compare_by_id cmp;
BOOST_REQUIRE_MESSAGE(boost::algorithm::is_sorted(ccv.shards(), cmp), ccv << " is expected to be sorted");
BOOST_REQUIRE_EQUAL(ccv.total_value(), expected_value);
n++;
});
});
BOOST_REQUIRE_EQUAL(n, 5);
};
@@ -5678,7 +5676,7 @@ SEASTAR_TEST_CASE(sstable_run_based_compaction_test) {
auto tokens = token_generation_for_current_shard(16);
std::unordered_set<shared_sstable> sstables;
std::optional<utils::observer<sstable&>> observer;
std::vector<utils::observer<sstable&>> observers;
sstables::sstable_run_based_compaction_strategy_for_tests cs;
auto do_replace = [&] (const std::vector<shared_sstable>& old_sstables, const std::vector<shared_sstable>& new_sstables) {
@@ -5701,15 +5699,20 @@ SEASTAR_TEST_CASE(sstable_run_based_compaction_test) {
// check that sstable replacement follows token order
BOOST_REQUIRE(*expected_sst == old_sstables.front()->generation());
expected_sst++;
// check that previously released sstable was already closed
BOOST_REQUIRE(*closed_sstables_tracker == old_sstables.front()->generation());
// check that previously released sstables were already closed
if (old_sstables.front()->generation() % 4 == 0) {
// Due to performance reasons, sstables are not released immediately, but in batches.
// At the time of writing, mutation_reader_merger releases it's sstable references
// in batches of 4. That's why we only perform this check every 4th sstable.
BOOST_REQUIRE(*closed_sstables_tracker == old_sstables.front()->generation());
}
do_replace(old_sstables, new_sstables);
observer = old_sstables.front()->add_on_closed_handler([&] (sstable& sst) {
observers.push_back(old_sstables.front()->add_on_closed_handler([&] (sstable& sst) {
testlog.info("Closing sstable of generation {}", sst.generation());
closed_sstables_tracker++;
});
}));
testlog.info("Removing sstable of generation {}, refcnt: {}", old_sstables.front()->generation(), old_sstables.front().use_count());
};
@@ -5745,7 +5748,6 @@ SEASTAR_TEST_CASE(sstable_run_based_compaction_test) {
};
auto result = compact(std::move(desc.sstables), replacer);
observer.reset();
BOOST_REQUIRE_EQUAL(expected_output, result.size());
BOOST_REQUIRE(expected_sst == sstable_run.end());
return result;

View File

@@ -27,10 +27,10 @@
#include <boost/range/irange.hpp>
#include <boost/range/algorithm/generate.hpp>
#include "data/cell.hh"
#include "test/lib/random_utils.hh"
#include "utils/disk-error-handler.hh"
#include "atomic_cell.hh"
#include "types.hh"
BOOST_AUTO_TEST_CASE(test_atomic_cell) {
struct test_case {
@@ -44,16 +44,14 @@ BOOST_AUTO_TEST_CASE(test_atomic_cell) {
auto cases = std::vector<test_case> {
// Live, fixed-size, empty cell
{ true, true, bytes(), false },
// Live, fixed-size cell
{ true, true, tests::random::get_bytes(data::cell::maximum_internal_storage_length / 2), false, false },
// Live, variable-size (small), cell
{ true, false, tests::random::get_bytes(data::cell::maximum_internal_storage_length / 2), false, false },
// Live, variable-size (large), cell
{ true, false, tests::random::get_bytes(data::cell::maximum_external_chunk_length * 5), false, false },
// Live, variable-size, empty cell
// Live, small cell
{ true, false, tests::random::get_bytes(1024), false, false },
// Live, large cell
{ true, false, tests::random::get_bytes(129 * 1024), false, false },
// Live, empty cell
{ true, false, bytes(), false, false },
// Live, expiring, variable-size cell
{ true, false, tests::random::get_bytes(data::cell::maximum_internal_storage_length / 2), true, false },
// Live, expiring cell
{ true, false, tests::random::get_bytes(1024), true, false },
// Dead cell
{ false, false, bytes(), false, false },
// Counter update cell
@@ -67,32 +65,12 @@ BOOST_AUTO_TEST_CASE(test_atomic_cell) {
auto& expiring = tc.expiring;
auto& counter_update = tc.counter_update;
auto timestamp = tests::random::get_int<api::timestamp_type>();
auto ti = [&] {
if (fixed_size) {
return data::type_info::make_fixed_size(value.size());
} else {
return data::type_info::make_variable_size();
}
}();
auto ttl = gc_clock::duration(tests::random::get_int<int32_t>(1, std::numeric_limits<int32_t>::max()));
auto expiry_time = gc_clock::time_point(gc_clock::duration(tests::random::get_int<int32_t>(1, std::numeric_limits<int32_t>::max())));
auto deletion_time = expiry_time;
auto counter_update_value = tests::random::get_int<int64_t>();
std::optional<imr::alloc::object_allocator> allocator;
allocator.emplace();
auto test_cell = [&] (auto builder) {
auto expected_size = data::cell::size_of(builder, *allocator);
if (fixed_size) {
BOOST_CHECK_GE(expected_size, value.size());
}
allocator->allocate_all();
auto buffer = std::make_unique<uint8_t[]>(expected_size);
BOOST_CHECK_EQUAL(data::cell::serialize(buffer.get(), builder, *allocator), expected_size);
auto test_cell = [&] (auto cell) {
auto verify_cell = [&] (auto view) {
if (!live) {
BOOST_CHECK(!view.is_live());
@@ -106,57 +84,29 @@ BOOST_AUTO_TEST_CASE(test_atomic_cell) {
BOOST_CHECK_EQUAL(view.counter_update_value(), counter_update_value);
} else {
BOOST_CHECK(!view.is_counter_update());
BOOST_CHECK(view.value() == value);
BOOST_CHECK(view.value() == managed_bytes_view(bytes_view(value)));
}
BOOST_CHECK_EQUAL(view.is_expiring(), expiring);
BOOST_CHECK_EQUAL(view.is_live_and_has_ttl(), expiring);
if (expiring) {
BOOST_CHECK(view.ttl() == ttl);
BOOST_CHECK(view.expiry() == expiry_time);
}
};
auto view = data::cell::make_atomic_cell_view(ti, buffer.get());
auto view = atomic_cell_view(cell);
verify_cell(view);
allocator.emplace();
auto copier = data::cell::copy_fn(ti, buffer.get());
BOOST_CHECK_EQUAL(data::cell::size_of(copier, *allocator), expected_size);
allocator->allocate_all();
auto copied = std::make_unique<uint8_t[]>(expected_size);
BOOST_CHECK_EQUAL(data::cell::serialize(copied.get(), copier, *allocator), expected_size);
auto view2 = data::cell::make_atomic_cell_view(ti, copied.get());
verify_cell(view2);
auto ctx = data::cell::context(buffer.get(), ti);
BOOST_CHECK_EQUAL(data::cell::structure::serialized_object_size(buffer.get(), ctx), expected_size);
auto moved = std::make_unique<uint8_t[]>(expected_size);
std::copy_n(buffer.get(), expected_size, moved.get());
imr::methods::move<data::cell::structure>(moved.get());
auto view3 = data::cell::make_atomic_cell_view(ti, moved.get());
verify_cell(view3);
imr::methods::destroy<data::cell::structure>(moved.get());
imr::methods::destroy<data::cell::structure>(copied.get());
};
if (live) {
if (counter_update) {
test_cell(data::cell::make_live_counter_update(timestamp, counter_update_value));
test_cell(atomic_cell::make_live_counter_update(timestamp, counter_update_value));
} else if (expiring) {
test_cell(data::cell::make_live(ti, timestamp, value, expiry_time, ttl));
test_cell(atomic_cell::make_live(*bytes_type, timestamp, value, expiry_time, ttl));
} else {
test_cell(data::cell::make_live(ti, timestamp, value));
test_cell(atomic_cell::make_live(*bytes_type, timestamp, value));
}
} else {
test_cell(data::cell::make_dead(timestamp, deletion_time));
test_cell(atomic_cell::make_dead(timestamp, deletion_time));
}
}
}

View File

@@ -167,8 +167,7 @@ struct simple_type_traits<db_clock::time_point> {
template <typename T>
simple_type_impl<T>::simple_type_impl(abstract_type::kind k, sstring name, std::optional<uint32_t> value_length_if_fixed)
: concrete_type<T>(k, std::move(name), std::move(value_length_if_fixed),
data::type_info::make_fixed_size(simple_type_traits<T>::serialized_size)) {}
: concrete_type<T>(k, std::move(name), std::move(value_length_if_fixed)) {}
template <typename T>
integer_type_impl<T>::integer_type_impl(
@@ -206,22 +205,22 @@ int32_type_impl::int32_type_impl() : integer_type_impl{kind::int32, int32_type_n
long_type_impl::long_type_impl() : integer_type_impl{kind::long_kind, long_type_name, 8} {}
string_type_impl::string_type_impl(kind k, sstring name)
: concrete_type(k, name, {}, data::type_info::make_variable_size()) {}
: concrete_type(k, name, {}) {}
ascii_type_impl::ascii_type_impl() : string_type_impl(kind::ascii, ascii_type_name) {}
utf8_type_impl::utf8_type_impl() : string_type_impl(kind::utf8, utf8_type_name) {}
bytes_type_impl::bytes_type_impl()
: concrete_type(kind::bytes, bytes_type_name, {}, data::type_info::make_variable_size()) {}
: concrete_type(kind::bytes, bytes_type_name, {}) {}
boolean_type_impl::boolean_type_impl() : simple_type_impl<bool>(kind::boolean, boolean_type_name, 1) {}
date_type_impl::date_type_impl() : concrete_type(kind::date, date_type_name, 8, data::type_info::make_fixed_size(sizeof(uint64_t))) {}
date_type_impl::date_type_impl() : concrete_type(kind::date, date_type_name, 8) {}
timeuuid_type_impl::timeuuid_type_impl()
: concrete_type<utils::UUID>(
kind::timeuuid, timeuuid_type_name, 16, data::type_info::make_fixed_size(sizeof(uint64_t) * 2)) {}
kind::timeuuid, timeuuid_type_name, 16) {}
timestamp_type_impl::timestamp_type_impl() : simple_type_impl(kind::timestamp, timestamp_type_name, 8) {}
@@ -409,12 +408,12 @@ int64_t time_type_impl::from_sstring(sstring_view s) {
}
uuid_type_impl::uuid_type_impl()
: concrete_type(kind::uuid, uuid_type_name, 16, data::type_info::make_fixed_size(sizeof(uint64_t) * 2)) {}
: concrete_type(kind::uuid, uuid_type_name, 16) {}
using inet_address = seastar::net::inet_address;
inet_addr_type_impl::inet_addr_type_impl()
: concrete_type<inet_address>(kind::inet, inet_addr_type_name, {}, data::type_info::make_variable_size()) {}
: concrete_type<inet_address>(kind::inet, inet_addr_type_name, {}) {}
// Integer of same length of a given type. This is useful because our
// ntoh functions only know how to operate on integers.
@@ -454,12 +453,12 @@ double_type_impl::double_type_impl() : floating_type_impl{kind::double_kind, dou
float_type_impl::float_type_impl() : floating_type_impl{kind::float_kind, float_type_name, 4} {}
varint_type_impl::varint_type_impl() : concrete_type{kind::varint, varint_type_name, { }, data::type_info::make_variable_size()} { }
varint_type_impl::varint_type_impl() : concrete_type{kind::varint, varint_type_name, { }} { }
decimal_type_impl::decimal_type_impl() : concrete_type{kind::decimal, decimal_type_name, { }, data::type_info::make_variable_size()} { }
decimal_type_impl::decimal_type_impl() : concrete_type{kind::decimal, decimal_type_name, { }} { }
counter_type_impl::counter_type_impl()
: abstract_type{kind::counter, counter_type_name, {}, data::type_info::make_variable_size()} {}
: abstract_type{kind::counter, counter_type_name, {}} {}
// TODO(jhaberku): Move this to Seastar.
template <size_t... Ts, class Function>
@@ -472,7 +471,7 @@ auto generate_tuple_from_index(std::index_sequence<Ts...>, Function&& f) {
}
duration_type_impl::duration_type_impl()
: concrete_type(kind::duration, duration_type_name, {}, data::type_info::make_variable_size()) {}
: concrete_type(kind::duration, duration_type_name, {}) {}
using common_counter_type = cql_duration::common_counter_type;
static std::tuple<common_counter_type, common_counter_type, common_counter_type> deserialize_counters(bytes_view v) {
@@ -492,7 +491,7 @@ static std::tuple<common_counter_type, common_counter_type, common_counter_type>
}
empty_type_impl::empty_type_impl()
: abstract_type(kind::empty, empty_type_name, 0, data::type_info::make_fixed_size(0)) {}
: abstract_type(kind::empty, empty_type_name, 0) {}
logging::logger collection_type_impl::_logger("collection_type_impl");
const size_t collection_type_impl::max_elements;
@@ -689,7 +688,7 @@ void write_simple(bytes_ostream& out, std::type_identity_t<T> val) {
out.write(bytes_view(val_ptr, sizeof(T)));
}
void write_collection_value(bytes_ostream& out, cql_serialization_format sf, data::value_view val) {
void write_collection_value(bytes_ostream& out, cql_serialization_format sf, atomic_cell_value_view val) {
if (sf.using_32_bits_for_collections()) {
write_simple<int32_t>(out, int32_t(val.size_bytes()));
} else {
@@ -700,7 +699,7 @@ void write_collection_value(bytes_ostream& out, cql_serialization_format sf, dat
}
write_simple<uint16_t>(out, uint16_t(val.size_bytes()));
}
for (auto&& frag : val) {
for (auto&& frag : fragment_range(val)) {
out.write(frag);
}
}
@@ -1393,7 +1392,7 @@ static std::optional<data_type> update_listlike(
}
tuple_type_impl::tuple_type_impl(kind k, sstring name, std::vector<data_type> types, bool freeze_inner)
: concrete_type(k, std::move(name), { }, data::type_info::make_variable_size()), _types(std::move(types)) {
: concrete_type(k, std::move(name), { }), _types(std::move(types)) {
if (freeze_inner) {
for (auto& t : _types) {
t = t->freeze();
@@ -3051,21 +3050,13 @@ std::optional<data_type> abstract_type::update_user_type(const shared_ptr<const
return visit(*this, visitor{updated});
}
static bytes_view linearized(const data::value_view& v, std::vector<bytes>& store) {
if (v.is_fragmented()) {
return store.emplace_back(v.linearize());
}
return v.first_fragment();
}
static bytes_ostream serialize_for_cql_aux(const map_type_impl&, collection_mutation_view_description mut, cql_serialization_format sf) {
bytes_ostream out;
auto len_slot = out.write_place_holder(collection_size_len(sf));
int elements = 0;
for (auto&& e : mut.cells) {
if (e.second.is_live(mut.tomb, false)) {
write_collection_value(out, sf, data::value_view(e.first));
write_collection_value(out, sf, atomic_cell_value_view(e.first));
write_collection_value(out, sf, e.second.value());
elements += 1;
}
@@ -3080,7 +3071,7 @@ static bytes_ostream serialize_for_cql_aux(const set_type_impl&, collection_muta
int elements = 0;
for (auto&& e : mut.cells) {
if (e.second.is_live(mut.tomb, false)) {
write_collection_value(out, sf, data::value_view(e.first));
write_collection_value(out, sf, atomic_cell_value_view(e.first));
elements += 1;
}
}
@@ -3122,7 +3113,7 @@ static bytes_ostream serialize_for_cql_aux(const user_type_impl& type, collectio
if (e.second.is_live(mut.tomb, false)) {
auto value = e.second.value();
write_simple<int32_t>(out, int32_t(value.size_bytes()));
for (auto&& frag : value) {
for (auto&& frag : fragment_range(value)) {
out.write(frag);
}
} else {

View File

@@ -24,7 +24,6 @@
#include <optional>
#include <boost/functional/hash.hpp>
#include <iosfwd>
#include "data/cell.hh"
#include <sstream>
#include <iterator>
@@ -35,7 +34,6 @@
#include "db_clock.hh"
#include "bytes.hh"
#include "log.hh"
#include "atomic_cell.hh"
#include "cql_serialization_format.hh"
#include "tombstone.hh"
#include "to_string.hh"
@@ -51,6 +49,7 @@
#include "hashing.hh"
#include "utils/fragmented_temporary_buffer.hh"
#include "utils/exceptions.hh"
#include "utils/managed_bytes.hh"
class tuple_type_impl;
class big_decimal;
@@ -466,7 +465,6 @@ class user_type_impl;
class abstract_type : public enable_shared_from_this<abstract_type> {
sstring _name;
std::optional<uint32_t> _value_length_if_fixed;
data::type_imr_descriptor _imr_state;
public:
enum class kind : int8_t {
ascii,
@@ -504,10 +502,9 @@ private:
public:
kind get_kind() const { return _kind; }
abstract_type(kind k, sstring name, std::optional<uint32_t> value_length_if_fixed, data::type_info ti)
: _name(name), _value_length_if_fixed(std::move(value_length_if_fixed)), _imr_state(ti), _kind(k) {}
abstract_type(kind k, sstring name, std::optional<uint32_t> value_length_if_fixed)
: _name(name), _value_length_if_fixed(std::move(value_length_if_fixed)), _kind(k) {}
virtual ~abstract_type() {}
const data::type_imr_descriptor& imr_state() const { return _imr_state; }
bool less(bytes_view v1, bytes_view v2) const { return compare(v1, v2) < 0; }
// returns a callable that can be called with two byte_views, and calls this->less() on them.
serialized_compare as_less_comparator() const ;
@@ -834,7 +831,7 @@ class reversed_type_impl : public abstract_type {
data_type _underlying_type;
reversed_type_impl(data_type t)
: abstract_type(kind::reversed, "org.apache.cassandra.db.marshal.ReversedType(" + t->name() + ")",
t->value_length_if_fixed(), t->imr_state().type_info())
t->value_length_if_fixed())
, _underlying_type(t)
{}
public:
@@ -1161,26 +1158,6 @@ typename Type::value_type deserialize_value(Type& t, bytes_view v) {
return t.deserialize_value(v);
}
// Does not check bounds. Must be called only after size is already checked.
template<FragmentedView View>
void read_fragmented(View& v, size_t n, bytes::value_type* out) {
while (n) {
if (n <= v.current_fragment().size()) {
std::copy_n(v.current_fragment().data(), n, out);
v.remove_prefix(n);
n = 0;
} else {
out = std::copy_n(v.current_fragment().data(), v.current_fragment().size(), out);
n -= v.current_fragment().size();
v.remove_current();
}
}
}
template<> void inline read_fragmented(single_fragmented_view& v, size_t n, bytes::value_type* out) {
std::copy_n(v.current_fragment().data(), n, out);
v.remove_prefix(n);
}
template<typename T>
T read_simple(bytes_view& v) {
if (v.size() < sizeof(T)) {
@@ -1191,21 +1168,6 @@ T read_simple(bytes_view& v) {
return net::ntoh(*reinterpret_cast<const net::packed<T>*>(p));
}
template<typename T, FragmentedView View>
T read_simple(View& v) {
if (v.current_fragment().size() >= sizeof(T)) [[likely]] {
auto p = v.current_fragment().data();
v.remove_prefix(sizeof(T));
return net::ntoh(*reinterpret_cast<const net::packed<T>*>(p));
} else if (v.size_bytes() >= sizeof(T)) {
T buf;
read_fragmented(v, sizeof(T), reinterpret_cast<bytes::value_type*>(&buf));
return net::ntoh(buf);
} else {
throw_with_backtrace<marshal_exception>(format("read_simple - not enough bytes (expected {:d}, got {:d})", sizeof(T), v.size_bytes()));
}
}
template<typename T>
T read_simple_exactly(bytes_view v) {
if (v.size() != sizeof(T)) {
@@ -1215,20 +1177,6 @@ T read_simple_exactly(bytes_view v) {
return net::ntoh(*reinterpret_cast<const net::packed<T>*>(p));
}
template<typename T, FragmentedView View>
T read_simple_exactly(View v) {
if (v.current_fragment().size() == sizeof(T)) [[likely]] {
auto p = v.current_fragment().data();
return net::ntoh(*reinterpret_cast<const net::packed<T>*>(p));
} else if (v.size_bytes() == sizeof(T)) {
T buf;
read_fragmented(v, sizeof(T), reinterpret_cast<bytes::value_type*>(&buf));
return net::ntoh(buf);
} else {
throw_with_backtrace<marshal_exception>(format("read_simple_exactly - size mismatch (expected {:d}, got {:d})", sizeof(T), v.size_bytes()));
}
}
inline
bytes_view
read_simple_bytes(bytes_view& v, size_t n) {

View File

@@ -44,7 +44,7 @@ public:
protected:
bool _is_multi_cell;
explicit collection_type_impl(kind k, sstring name, bool is_multi_cell)
: abstract_type(k, std::move(name), {}, data::type_info::make_collection()), _is_multi_cell(is_multi_cell) {}
: abstract_type(k, std::move(name), {}), _is_multi_cell(is_multi_cell) {}
public:
bool is_multi_cell() const { return _is_multi_cell; }
virtual data_type name_comparator() const = 0;

View File

@@ -24,8 +24,10 @@
#include <concepts>
#include <boost/range/algorithm/copy.hpp>
#include <boost/range/algorithm/for_each.hpp>
#include <seastar/net/byteorder.hh>
#include <seastar/core/print.hh>
#include "marshal_exception.hh"
#include "bytes.hh"
enum class mutable_view { no, yes, };
@@ -181,6 +183,7 @@ concept FragmentedMutableView = requires (T view) {
template<FragmentedView View>
struct fragment_range {
using fragment_type = typename View::fragment_type;
View view;
class fragment_iterator {
using iterator_category = std::input_iterator_tag;
@@ -209,9 +212,12 @@ struct fragment_range {
pointer operator->() const { return &_current; }
bool operator==(const fragment_iterator& i) const { return _view.size_bytes() == i._view.size_bytes(); }
};
using iterator = fragment_iterator;
fragment_range(const View& v) : view(v) {}
fragment_iterator begin() const { return fragment_iterator(view); }
fragment_iterator end() const { return fragment_iterator(); }
size_t size_bytes() const { return view.size_bytes(); }
bool empty() const { return view.empty(); }
};
template<FragmentedView View>
@@ -238,19 +244,27 @@ decltype(auto) with_linearized(const View& v, Function&& fn)
}
}
class single_fragmented_view {
bytes_view _view;
template <mutable_view is_mutable>
class basic_single_fragmented_view {
public:
using fragment_type = bytes_view;
explicit single_fragmented_view(bytes_view bv) : _view(bv) {}
using fragment_type = std::conditional_t<is_mutable == mutable_view::yes, bytes_mutable_view, bytes_view>;
private:
fragment_type _view;
public:
explicit basic_single_fragmented_view(fragment_type bv) : _view(bv) {}
size_t size_bytes() const { return _view.size(); }
bool empty() const { return _view.empty(); }
void remove_prefix(size_t n) { _view.remove_prefix(n); }
void remove_current() { _view = bytes_view(); }
bytes_view current_fragment() const { return _view; }
single_fragmented_view prefix(size_t n) { return single_fragmented_view(_view.substr(0, n)); }
void remove_current() { _view = fragment_type(); }
fragment_type current_fragment() const { return _view; }
basic_single_fragmented_view prefix(size_t n) { return basic_single_fragmented_view(_view.substr(0, n)); }
};
using single_fragmented_view = basic_single_fragmented_view<mutable_view::no>;
using single_fragmented_mutable_view = basic_single_fragmented_view<mutable_view::yes>;
static_assert(FragmentedView<single_fragmented_view>);
static_assert(FragmentedMutableView<single_fragmented_mutable_view>);
static_assert(FragmentRange<fragment_range<single_fragmented_view>>);
static_assert(FragmentRange<fragment_range<single_fragmented_mutable_view>>);
template<FragmentedView View, typename Function>
requires std::invocable<Function, View> && std::invocable<Function, single_fragmented_view>
@@ -276,6 +290,11 @@ int compare_unsigned(V1 v1, V2 v2) {
return v1.size_bytes() - v2.size_bytes();
}
template<FragmentedView V1, FragmentedView V2>
int equal_unsigned(V1 v1, V2 v2) {
return v1.size_bytes() == v2.size_bytes() && compare_unsigned(v1, v2) == 0;
}
template<FragmentedMutableView Dest, FragmentedView Src>
void write_fragmented(Dest& dest, Src src) {
if (dest.size_bytes() < src.size_bytes()) [[unlikely]] {
@@ -288,3 +307,105 @@ void write_fragmented(Dest& dest, Src src) {
src.remove_prefix(n);
}
}
template<FragmentedMutableView Dest, FragmentedView Src>
void copy_fragmented_view(Dest dest, Src src) {
if (dest.size_bytes() < src.size_bytes()) [[unlikely]] {
throw std::out_of_range(format("tried to copy a buffer of size {} to a buffer of smaller size {}", src.size_bytes(), dest.size_bytes()));
}
while (!src.empty()) {
size_t n = std::min(dest.current_fragment().size(), src.current_fragment().size());
memcpy(dest.current_fragment().data(), src.current_fragment().data(), n);
dest.remove_prefix(n);
src.remove_prefix(n);
}
}
// Does not check bounds. Must be called only after size is already checked.
template<FragmentedView View>
void read_fragmented(View& v, size_t n, bytes::value_type* out) {
while (n) {
if (n <= v.current_fragment().size()) {
std::copy_n(v.current_fragment().data(), n, out);
v.remove_prefix(n);
n = 0;
} else {
out = std::copy_n(v.current_fragment().data(), v.current_fragment().size(), out);
n -= v.current_fragment().size();
v.remove_current();
}
}
}
template<> void inline read_fragmented(single_fragmented_view& v, size_t n, bytes::value_type* out) {
std::copy_n(v.current_fragment().data(), n, out);
v.remove_prefix(n);
}
template<typename T, FragmentedView View>
T read_simple_native(View& v) {
if (v.current_fragment().size() >= sizeof(T)) [[likely]] {
auto p = v.current_fragment().data();
v.remove_prefix(sizeof(T));
return *reinterpret_cast<const net::packed<T>*>(p);
} else if (v.size_bytes() >= sizeof(T)) {
T buf;
read_fragmented(v, sizeof(T), reinterpret_cast<bytes::value_type*>(&buf));
return buf;
} else {
throw_with_backtrace<marshal_exception>(format("read_simple - not enough bytes (expected {:d}, got {:d})", sizeof(T), v.size_bytes()));
}
}
template<typename T, FragmentedView View>
T read_simple(View& v) {
if (v.current_fragment().size() >= sizeof(T)) [[likely]] {
auto p = v.current_fragment().data();
v.remove_prefix(sizeof(T));
return net::ntoh(*reinterpret_cast<const net::packed<T>*>(p));
} else if (v.size_bytes() >= sizeof(T)) {
T buf;
read_fragmented(v, sizeof(T), reinterpret_cast<bytes::value_type*>(&buf));
return net::ntoh(buf);
} else {
throw_with_backtrace<marshal_exception>(format("read_simple - not enough bytes (expected {:d}, got {:d})", sizeof(T), v.size_bytes()));
}
}
template<typename T, FragmentedView View>
T read_simple_exactly(View v) {
if (v.current_fragment().size() == sizeof(T)) [[likely]] {
auto p = v.current_fragment().data();
return net::ntoh(*reinterpret_cast<const net::packed<T>*>(p));
} else if (v.size_bytes() == sizeof(T)) {
T buf;
read_fragmented(v, sizeof(T), reinterpret_cast<bytes::value_type*>(&buf));
return net::ntoh(buf);
} else {
throw_with_backtrace<marshal_exception>(format("read_simple_exactly - size mismatch (expected {:d}, got {:d})", sizeof(T), v.size_bytes()));
}
}
template<typename T, FragmentedMutableView Out>
static inline
void write(Out& out, std::type_identity_t<T> val) {
auto v = net::ntoh(val);
auto p = reinterpret_cast<const bytes_view::value_type*>(&v);
if (out.current_fragment().size() >= sizeof(v)) [[likely]] {
std::copy_n(p, sizeof(v), out.current_fragment().data());
out.remove_prefix(sizeof(v));
} else {
write_fragmented(out, single_fragmented_view(bytes_view(p, sizeof(v))));
}
}
template<typename T, FragmentedMutableView Out>
static inline
void write_native(Out& out, std::type_identity_t<T> v) {
auto p = reinterpret_cast<const bytes_view::value_type*>(&v);
if (out.current_fragment().size() >= sizeof(v)) [[likely]] {
std::copy_n(p, sizeof(v), out.current_fragment().data());
out.remove_prefix(sizeof(v));
} else {
write_fragmented(out, single_fragmented_view(bytes_view(p, sizeof(v))));
}
}

View File

@@ -394,6 +394,7 @@ private:
size_t _size = 0;
public:
managed_bytes_basic_view() = default;
managed_bytes_basic_view(const managed_bytes_basic_view&) = default;
managed_bytes_basic_view(owning_type& mb) {
if (mb._u.small.size != -1) {
_current_fragment = fragment_type(mb._u.small.data, mb._u.small.size);
@@ -437,11 +438,34 @@ public:
v._current_fragment = v._current_fragment.substr(0, len);
return v;
}
managed_bytes_basic_view substr(size_t offset, size_t len) const {
size_t end = std::min(offset + len, _size);
managed_bytes_basic_view v = prefix(end);
v.remove_prefix(offset);
return v;
}
const auto& front() const { return _current_fragment.front(); }
auto& front() { return _current_fragment.front(); }
const value_type& operator[](size_t index) const {
auto v = *this;
v.remove_prefix(index);
return v.current_fragment().front();
}
bool is_fragmented() const {
return _size != _current_fragment.size();
}
bytes linearize() const {
return linearized(*this);
}
// Allow casting mutable views to immutable views.
friend class managed_bytes_basic_view<mutable_view::no>;
managed_bytes_basic_view(const managed_bytes_basic_view<mutable_view::yes>& other)
requires (is_mutable == mutable_view::no)
: _current_fragment(other._current_fragment.data(), other._current_fragment.size())
, _next_fragments(other._next_fragments)
, _size(other._size)
{}
};
static_assert(FragmentedView<managed_bytes_view>);
static_assert(FragmentedMutableView<managed_bytes_mutable_view>);

View File

@@ -1,175 +0,0 @@
/*
* Copyright (C) 2018 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <sys/types.h>
#include <cstddef>
#include <type_traits>
#include <utility>
namespace meta {
// Wrappers that allows returning a list of types. All helpers defined in this
// file accept both unpacked and packed lists of types.
template<typename... Ts>
struct list { };
namespace internal {
template<bool... Vs>
constexpr ssize_t do_find_if_unpacked() {
ssize_t i = -1;
ssize_t j = 0;
(..., ((Vs && i == -1) ? i = j : j++));
return i;
}
template<ssize_t N>
struct negative_to_empty : std::integral_constant<size_t, N> { };
template<>
struct negative_to_empty<-1> { };
template<typename T>
struct is_same_as {
template<typename U>
using type = std::is_same<T, U>;
};
template<template<class> typename Predicate, typename... Ts>
struct do_find_if : internal::negative_to_empty<internal::do_find_if_unpacked<Predicate<Ts>::value...>()> { };
template<template<class> typename Predicate, typename... Ts>
struct do_find_if<Predicate, meta::list<Ts...>> : internal::negative_to_empty<internal::do_find_if_unpacked<Predicate<Ts>::value...>()> { };
}
// Returns the index of the first type in the list of types list of types Ts for
// which Predicate<T::value is true.
template<template<class> typename Predicate, typename... Ts>
constexpr size_t find_if = internal::do_find_if<Predicate, Ts...>::value;
// Returns the index of the first occurrence of type T in the list of types Ts.
template<typename T, typename... Ts>
constexpr size_t find = find_if<internal::is_same_as<T>::template type, Ts...>;
namespace internal {
template<size_t N, typename... Ts>
struct do_get_unpacked { };
template<size_t N, typename T, typename... Ts>
struct do_get_unpacked<N, T, Ts...> : do_get_unpacked<N - 1, Ts...> { };
template<typename T, typename... Ts>
struct do_get_unpacked<0, T, Ts...> {
using type = T;
};
template<size_t N, typename... Ts>
struct do_get : do_get_unpacked<N, Ts...> { };
template<size_t N, typename... Ts>
struct do_get<N, meta::list<Ts...>> : do_get_unpacked<N, Ts...> { };
}
// Returns the Nth type in the provided list of types.
template<size_t N, typename... Ts>
using get = typename internal::do_get<N, Ts...>::type;
namespace internal {
template<size_t N, typename Result, typename... Ts>
struct do_take_unpacked { };
template<typename... Ts>
struct do_take_unpacked<0, list<Ts...>> {
using type = list<Ts...>;
};
template<typename... Ts, typename U, typename... Us>
struct do_take_unpacked<0, list<Ts...>, U, Us...> {
using type = list<Ts...>;
};
template<size_t N, typename... Ts, typename U, typename... Us>
struct do_take_unpacked<N, list<Ts...>, U, Us...> {
using type = typename do_take_unpacked<N - 1, list<Ts..., U>, Us...>::type;
};
template<size_t N, typename Result, typename... Ts>
struct do_take : do_take_unpacked<N, Result, Ts...> { };
template<size_t N, typename Result, typename... Ts>
struct do_take<N, Result, meta::list<Ts...>> : do_take_unpacked<N, Result, Ts...> { };
}
// Returns a list containing N first elements of the provided list of types.
template<size_t N, typename... Ts>
using take = typename internal::do_take<N, list<>, Ts...>::type;
namespace internal {
template<typename... Ts>
struct do_for_each_unpacked {
template<typename Function>
static constexpr void run(Function&& fn) {
(..., fn(static_cast<Ts*>(nullptr)));
}
};
template<typename... Ts>
struct do_for_each : do_for_each_unpacked<Ts...> { };
template<typename... Ts>
struct do_for_each<meta::list<Ts...>> : do_for_each_unpacked<Ts...> { };
}
// Executes the provided function for each element in the provided list of
// types. For each type T the Function is called with an argument of type T*.
template<typename... Ts, typename Function>
constexpr void for_each(Function&& fn) {
internal::do_for_each<Ts...>::run(std::forward<Function>(fn));
};
namespace internal {
template<typename... Ts>
struct get_size : std::integral_constant<size_t, sizeof...(Ts)> { };
template<typename... Ts>
struct get_size<meta::list<Ts...>> : std::integral_constant<size_t, sizeof...(Ts)> { };
}
// Returns the size of a list of types.
template<typename... Ts>
constexpr size_t size = internal::get_size<Ts...>::value;
template<template <class> typename Predicate, typename... Ts>
static constexpr bool all_of = std::conjunction_v<Predicate<Ts>...>;
}

View File

@@ -61,6 +61,8 @@ public:
CharT* data() const { return _begin; }
size_t size() const { return _end - _begin; }
bool empty() const { return _begin == _end; }
CharT& front() { return *_begin; }
const CharT& front() const { return *_begin; }
void remove_prefix(size_t n) {
_begin += n;

View File

@@ -160,16 +160,3 @@ void write(CharOutputIterator& out, const T& val) {
auto v = net::ntoh(val);
out = std::copy_n(reinterpret_cast<char*>(&v), sizeof(v), out);
}
template<typename T, FragmentedMutableView Out>
static inline
void write(Out& out, std::type_identity_t<T> val) {
auto v = net::ntoh(val);
auto p = reinterpret_cast<const bytes_view::value_type*>(&v);
if (out.current_fragment().size() >= sizeof(v)) [[likely]] {
std::copy_n(p, sizeof(v), out.current_fragment().data());
out.remove_prefix(sizeof(v));
} else {
write_fragmented(out, single_fragmented_view(bytes_view(p, sizeof(v))));
}
}