storage_proxy: remove old get_restricted_ranges() interface

It is not used any more.
This commit is contained in:
Gleb Natapov
2019-02-11 11:44:11 +02:00
parent 0cd9bbb71d
commit ecc5230de5
2 changed files with 0 additions and 70 deletions

View File

@@ -3455,74 +3455,6 @@ void query_ranges_to_vnodes_generator::process_one_range(size_t n, dht::partitio
}
}
/**
* Compute all ranges we're going to query, in sorted order. Nodes can be replica destinations for many ranges,
* so we need to restrict each scan to the specific range we want, or else we'd get duplicate results.
*/
dht::partition_range_vector
storage_proxy::get_restricted_ranges(const schema& s, dht::partition_range range) {
locator::token_metadata& tm = get_local_storage_service().get_token_metadata();
return service::get_restricted_ranges(tm, s, std::move(range));
}
dht::partition_range_vector
get_restricted_ranges(locator::token_metadata& tm, const schema& s, dht::partition_range range) {
dht::ring_position_comparator cmp(s);
// special case for bounds containing exactly 1 token
if (start_token(range) == end_token(range)) {
if (start_token(range).is_minimum()) {
return {};
}
return dht::partition_range_vector({std::move(range)});
}
dht::partition_range_vector ranges;
auto add_range = [&ranges, &cmp] (dht::partition_range&& r) {
ranges.emplace_back(std::move(r));
};
// divide the queryRange into pieces delimited by the ring
auto ring_iter = tm.ring_range(range.start(), false);
dht::partition_range remainder = std::move(range);
for (const dht::token& upper_bound_token : ring_iter)
{
/*
* remainder can be a range/bounds of token _or_ keys and we want to split it with a token:
* - if remainder is tokens, then we'll just split using the provided token.
* - if remainder is keys, we want to split using token.upperBoundKey. For instance, if remainder
* is [DK(10, 'foo'), DK(20, 'bar')], and we have 3 nodes with tokens 0, 15, 30. We want to
* split remainder to A=[DK(10, 'foo'), 15] and B=(15, DK(20, 'bar')]. But since we can't mix
* tokens and keys at the same time in a range, we uses 15.upperBoundKey() to have A include all
* keys having 15 as token and B include none of those (since that is what our node owns).
* asSplitValue() abstracts that choice.
*/
dht::ring_position split_point(upper_bound_token, dht::ring_position::token_bound::end);
if (!remainder.contains(split_point, cmp)) {
break; // no more splits
}
{
// We shouldn't attempt to split on upper bound, because it may result in
// an ambiguous range of the form (x; x]
if (end_token(remainder) == upper_bound_token) {
break;
}
std::pair<dht::partition_range, dht::partition_range> splits =
remainder.split(split_point, cmp);
add_range(std::move(splits.first));
remainder = std::move(splits.second);
}
}
add_range(std::move(remainder));
return ranges;
}
bool storage_proxy::hints_enabled(db::write_type type) noexcept {
return bool(_hints_manager) || type == db::write_type::VIEW;
}

View File

@@ -340,8 +340,6 @@ public:
future<> mutate_streaming_mutation(const schema_ptr&, utils::UUID plan_id, const frozen_mutation& m, bool fragmented);
dht::partition_range_vector get_restricted_ranges(const schema& s, dht::partition_range range);
/**
* Use this method to have these Mutations applied
* across all replicas. This method will take care