Files
scylladb/range_tombstone_assembler.hh
Tomasz Grabiec 9996b7ca18 flat_mutation_reader: Introduce adaptors between v1 and v2 of mutation fragment stream
The transition to v2 will be incremental. To support that, we need
adaptors between v1 and v2 which will be inserted at places which are
boundaries of conversion.

The v1 -> v2 converter needs to accumulate range tombstones, so has unbounded worst case memory footprint.

The v2 -> v1 converter trims range tombstones around clustering rows,
so generates more fragments than necessary.

Because of that, adpators are a temporary solution and we should not
release with them on the produciton code paths.
2021-06-15 13:10:47 +02:00

92 lines
3.1 KiB
C++

/*
* Copyright (C) 2021 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <exception>
#include <seastar/core/print.hh>
#include "mutation_fragment_v2.hh"
/// Converts a stream of range_tombstone_change fragments to an equivalent stream of range_tombstone objects.
/// The input fragments must be ordered by their position().
/// The produced range_tombstone objects are non-overlapping and ordered by their position().
///
/// on_end_of_stream() must be called after consuming all fragments to produce the final fragment.
///
/// Example usage:
///
/// range_tombstone_assembler rta;
/// if (auto rt_opt = rta.consume(range_tombstone_change(...))) {
/// produce(*rt_opt);
/// }
/// if (auto rt_opt = rta.consume(range_tombstone_change(...))) {
/// produce(*rt_opt);
/// }
/// if (auto rt_opt = rta.flush(position_in_partition(...)) {
/// produce(*rt_opt);
/// }
/// rta.on_end_of_stream();
///
class range_tombstone_assembler {
std::optional<range_tombstone_change> _prev_rt;
private:
bool has_active_tombstone() const {
return _prev_rt && _prev_rt->tombstone();
}
public:
void reset() {
_prev_rt = std::nullopt;
}
std::optional<range_tombstone> consume(const schema& s, range_tombstone_change&& rt) {
std::optional<range_tombstone> rt_opt;
auto less = position_in_partition::less_compare(s);
if (has_active_tombstone() && less(_prev_rt->position(), rt.position())) {
rt_opt = range_tombstone(_prev_rt->position(), rt.position(), _prev_rt->tombstone());
}
_prev_rt = std::move(rt);
return rt_opt;
}
void on_end_of_stream() {
if (has_active_tombstone()) {
throw std::logic_error(format("Stream ends with an active range tombstone: {}", *_prev_rt));
}
}
// Returns true if and only if flush() may return something.
// Returns false if flush() won't return anything for sure.
bool needs_flush() const {
return has_active_tombstone();
}
std::optional<range_tombstone> flush(const schema& s, position_in_partition_view pos) {
auto less = position_in_partition::less_compare(s);
if (has_active_tombstone() && less(_prev_rt->position(), pos)) {
position_in_partition start = _prev_rt->position();
_prev_rt->set_position(position_in_partition(pos));
return range_tombstone(std::move(start), pos, _prev_rt->tombstone());
}
return std::nullopt;
}
};