on_down() iterates over _view_update_handlers_list, but it yields during iteration, and while it yields, elements in that list can be removed, resulting in a use-after-free. Prevent this by registering iterators that can be potentially invalidated, and any time we remove an element from the list, check whether we're removing an element that is being pointed to by a live iterator. If that is the case, advance the iterator so that it points at a valid element (or at the end of the list). Fixes #4912. Tests: unit (dev)
499 lines
25 KiB
C++
499 lines
25 KiB
C++
/*
|
|
* Licensed to the Apache Software Foundation (ASF) under one
|
|
* or more contributor license agreements. See the NOTICE file
|
|
* distributed with this work for additional information
|
|
* regarding copyright ownership. The ASF licenses this file
|
|
* to you under the Apache License, Version 2.0 (the
|
|
* "License"); you may not use this file except in compliance
|
|
* with the License. You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
/*
|
|
* Copyright (C) 2015 ScyllaDB
|
|
*
|
|
* Modified by ScyllaDB
|
|
*/
|
|
|
|
/*
|
|
* This file is part of Scylla.
|
|
*
|
|
* Scylla is free software: you can redistribute it and/or modify
|
|
* it under the terms of the GNU Affero General Public License as published by
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* Scylla is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include "database_fwd.hh"
|
|
#include "query-request.hh"
|
|
#include "query-result.hh"
|
|
#include "query-result-set.hh"
|
|
#include <seastar/core/distributed.hh>
|
|
#include <seastar/core/execution_stage.hh>
|
|
#include "db/consistency_level_type.hh"
|
|
#include "db/read_repair_decision.hh"
|
|
#include "db/write_type.hh"
|
|
#include "db/hints/manager.hh"
|
|
#include "db/view/view_update_backlog.hh"
|
|
#include "db/view/node_view_update_backlog.hh"
|
|
#include "utils/histogram.hh"
|
|
#include "utils/estimated_histogram.hh"
|
|
#include "tracing/trace_state.hh"
|
|
#include <seastar/core/metrics.hh>
|
|
#include "frozen_mutation.hh"
|
|
#include "storage_proxy_stats.hh"
|
|
#include "cache_temperature.hh"
|
|
#include "mutation_query.hh"
|
|
#include "service_permit.hh"
|
|
|
|
namespace locator {
|
|
|
|
class token_metadata;
|
|
|
|
}
|
|
|
|
namespace compat {
|
|
|
|
class one_or_two_partition_ranges;
|
|
|
|
}
|
|
|
|
namespace service {
|
|
|
|
class abstract_write_response_handler;
|
|
class abstract_read_executor;
|
|
class mutation_holder;
|
|
class view_update_write_response_handler;
|
|
|
|
using replicas_per_token_range = std::unordered_map<dht::token_range, std::vector<utils::UUID>>;
|
|
|
|
struct view_update_backlog_timestamped {
|
|
db::view::update_backlog backlog;
|
|
api::timestamp_type ts;
|
|
};
|
|
|
|
struct allow_hints_tag {};
|
|
using allow_hints = bool_class<allow_hints_tag>;
|
|
|
|
|
|
class query_ranges_to_vnodes_generator {
|
|
schema_ptr _s;
|
|
dht::partition_range_vector _ranges;
|
|
dht::partition_range_vector::iterator _i; // iterator to current range in _ranges
|
|
bool _local;
|
|
locator::token_metadata& _tm;
|
|
void process_one_range(size_t n, dht::partition_range_vector& ranges);
|
|
public:
|
|
query_ranges_to_vnodes_generator(schema_ptr s, dht::partition_range_vector ranges, bool local = false);
|
|
query_ranges_to_vnodes_generator(locator::token_metadata& tm, schema_ptr s, dht::partition_range_vector ranges, bool local = false);
|
|
query_ranges_to_vnodes_generator(const query_ranges_to_vnodes_generator&) = delete;
|
|
query_ranges_to_vnodes_generator(query_ranges_to_vnodes_generator&&) = default;
|
|
// generate next 'n' vnodes, may return less than requested number of ranges
|
|
// which means either that there are no more ranges
|
|
// (in which case empty() == true), or too many ranges
|
|
// are requested
|
|
dht::partition_range_vector operator()(size_t n);
|
|
bool empty() const;
|
|
};
|
|
|
|
class storage_proxy : public seastar::async_sharded_service<storage_proxy>, public service::endpoint_lifecycle_subscriber /*implements StorageProxyMBean*/ {
|
|
public:
|
|
using clock_type = lowres_clock;
|
|
struct config {
|
|
std::optional<std::vector<sstring>> hinted_handoff_enabled = {};
|
|
size_t available_memory;
|
|
smp_service_group read_smp_service_group = default_smp_service_group();
|
|
smp_service_group write_smp_service_group = default_smp_service_group();
|
|
// Write acknowledgments might not be received on the correct shard, and
|
|
// they need a separate smp_service_group to prevent an ABBA deadlock
|
|
// with writes.
|
|
smp_service_group write_ack_smp_service_group = default_smp_service_group();
|
|
};
|
|
private:
|
|
|
|
using response_id_type = uint64_t;
|
|
struct unique_response_handler {
|
|
response_id_type id;
|
|
storage_proxy& p;
|
|
unique_response_handler(storage_proxy& p_, response_id_type id_);
|
|
unique_response_handler(const unique_response_handler&) = delete;
|
|
unique_response_handler& operator=(const unique_response_handler&) = delete;
|
|
unique_response_handler(unique_response_handler&& x);
|
|
~unique_response_handler();
|
|
response_id_type release();
|
|
};
|
|
|
|
public:
|
|
static const sstring COORDINATOR_STATS_CATEGORY;
|
|
static const sstring REPLICA_STATS_CATEGORY;
|
|
|
|
using write_stats = storage_proxy_stats::write_stats;
|
|
using stats = storage_proxy_stats::stats;
|
|
|
|
class coordinator_query_options {
|
|
clock_type::time_point _timeout;
|
|
|
|
public:
|
|
service_permit permit;
|
|
tracing::trace_state_ptr trace_state = nullptr;
|
|
replicas_per_token_range preferred_replicas;
|
|
std::optional<db::read_repair_decision> read_repair_decision;
|
|
|
|
coordinator_query_options(clock_type::time_point timeout,
|
|
service_permit permit_,
|
|
tracing::trace_state_ptr trace_state = nullptr,
|
|
replicas_per_token_range preferred_replicas = { },
|
|
std::optional<db::read_repair_decision> read_repair_decision = { })
|
|
: _timeout(timeout)
|
|
, permit(std::move(permit_))
|
|
, trace_state(std::move(trace_state))
|
|
, preferred_replicas(std::move(preferred_replicas))
|
|
, read_repair_decision(read_repair_decision) {
|
|
}
|
|
|
|
clock_type::time_point timeout(storage_proxy& sp) const {
|
|
return _timeout;
|
|
}
|
|
};
|
|
|
|
struct coordinator_query_result {
|
|
foreign_ptr<lw_shared_ptr<query::result>> query_result;
|
|
replicas_per_token_range last_replicas;
|
|
db::read_repair_decision read_repair_decision;
|
|
|
|
coordinator_query_result(foreign_ptr<lw_shared_ptr<query::result>> query_result,
|
|
replicas_per_token_range last_replicas = {},
|
|
db::read_repair_decision read_repair_decision = db::read_repair_decision::NONE)
|
|
: query_result(std::move(query_result))
|
|
, last_replicas(std::move(last_replicas))
|
|
, read_repair_decision(std::move(read_repair_decision)) {
|
|
}
|
|
};
|
|
private:
|
|
distributed<database>& _db;
|
|
smp_service_group _read_smp_service_group;
|
|
smp_service_group _write_smp_service_group;
|
|
smp_service_group _write_ack_smp_service_group;
|
|
response_id_type _next_response_id;
|
|
std::unordered_map<response_id_type, ::shared_ptr<abstract_write_response_handler>> _response_handlers;
|
|
// This buffer hold ids of throttled writes in case resource consumption goes
|
|
// below the threshold and we want to unthrottle some of them. Without this throttled
|
|
// request with dead or slow replica may wait for up to timeout ms before replying
|
|
// even if resource consumption will go to zero. Note that some requests here may
|
|
// be already completed by the point they tried to be unthrottled (request completion does
|
|
// not remove request from the buffer), but this is fine since request ids are unique, so we
|
|
// just skip an entry if request no longer exists.
|
|
circular_buffer<response_id_type> _throttled_writes;
|
|
db::hints::resource_manager _hints_resource_manager;
|
|
std::optional<db::hints::manager> _hints_manager;
|
|
db::hints::manager _hints_for_views_manager;
|
|
stats _stats;
|
|
static constexpr float CONCURRENT_SUBREQUESTS_MARGIN = 0.10;
|
|
// for read repair chance calculation
|
|
std::default_random_engine _urandom;
|
|
std::uniform_real_distribution<> _read_repair_chance = std::uniform_real_distribution<>(0,1);
|
|
seastar::metrics::metric_groups _metrics;
|
|
uint64_t _background_write_throttle_threahsold;
|
|
inheriting_concrete_execution_stage<
|
|
future<>,
|
|
storage_proxy*,
|
|
std::vector<mutation>,
|
|
db::consistency_level,
|
|
clock_type::time_point,
|
|
tracing::trace_state_ptr,
|
|
service_permit,
|
|
bool> _mutate_stage;
|
|
db::view::node_update_backlog& _max_view_update_backlog;
|
|
std::unordered_map<gms::inet_address, view_update_backlog_timestamped> _view_update_backlogs;
|
|
|
|
//NOTICE(sarna): This opaque pointer is here just to avoid moving write handler class definitions from .cc to .hh. It's slow path.
|
|
class view_update_handlers_list;
|
|
std::unique_ptr<view_update_handlers_list> _view_update_handlers_list;
|
|
|
|
private:
|
|
void uninit_messaging_service();
|
|
future<coordinator_query_result> query_singular(lw_shared_ptr<query::read_command> cmd,
|
|
dht::partition_range_vector&& partition_ranges,
|
|
db::consistency_level cl,
|
|
coordinator_query_options optional_params);
|
|
response_id_type register_response_handler(shared_ptr<abstract_write_response_handler>&& h);
|
|
void remove_response_handler(response_id_type id);
|
|
void got_response(response_id_type id, gms::inet_address from, std::optional<db::view::update_backlog> backlog);
|
|
void got_failure_response(response_id_type id, gms::inet_address from, size_t count, std::optional<db::view::update_backlog> backlog);
|
|
future<> response_wait(response_id_type id, clock_type::time_point timeout);
|
|
::shared_ptr<abstract_write_response_handler>& get_write_response_handler(storage_proxy::response_id_type id);
|
|
response_id_type create_write_response_handler(keyspace& ks, db::consistency_level cl, db::write_type type, std::unique_ptr<mutation_holder> m, std::unordered_set<gms::inet_address> targets,
|
|
const std::vector<gms::inet_address>& pending_endpoints, std::vector<gms::inet_address>, tracing::trace_state_ptr tr_state, storage_proxy::write_stats& stats, service_permit permit);
|
|
response_id_type create_write_response_handler(const mutation&, db::consistency_level cl, db::write_type type, tracing::trace_state_ptr tr_state, service_permit permit);
|
|
response_id_type create_write_response_handler(const std::unordered_map<gms::inet_address, std::optional<mutation>>&, db::consistency_level cl, db::write_type type, tracing::trace_state_ptr tr_state, service_permit permit);
|
|
void send_to_live_endpoints(response_id_type response_id, clock_type::time_point timeout);
|
|
template<typename Range>
|
|
size_t hint_to_dead_endpoints(std::unique_ptr<mutation_holder>& mh, const Range& targets, db::write_type type, tracing::trace_state_ptr tr_state) noexcept;
|
|
void hint_to_dead_endpoints(response_id_type, db::consistency_level);
|
|
template<typename Range>
|
|
bool cannot_hint(const Range& targets, db::write_type type);
|
|
bool hints_enabled(db::write_type type) noexcept;
|
|
db::hints::manager& hints_manager_for(db::write_type type);
|
|
std::vector<gms::inet_address> get_live_endpoints(keyspace& ks, const dht::token& token);
|
|
std::vector<gms::inet_address> get_live_sorted_endpoints(keyspace& ks, const dht::token& token);
|
|
db::read_repair_decision new_read_repair_decision(const schema& s);
|
|
::shared_ptr<abstract_read_executor> get_read_executor(lw_shared_ptr<query::read_command> cmd,
|
|
schema_ptr schema,
|
|
dht::partition_range pr,
|
|
db::consistency_level cl,
|
|
db::read_repair_decision repair_decision,
|
|
tracing::trace_state_ptr trace_state,
|
|
const std::vector<gms::inet_address>& preferred_endpoints,
|
|
bool& is_bounced_read,
|
|
service_permit permit);
|
|
future<foreign_ptr<lw_shared_ptr<query::result>>, cache_temperature> query_result_local(schema_ptr, lw_shared_ptr<query::read_command> cmd, const dht::partition_range& pr,
|
|
query::result_options opts,
|
|
tracing::trace_state_ptr trace_state,
|
|
clock_type::time_point timeout,
|
|
uint64_t max_size = query::result_memory_limiter::maximum_result_size);
|
|
future<query::result_digest, api::timestamp_type, cache_temperature> query_result_local_digest(schema_ptr, lw_shared_ptr<query::read_command> cmd, const dht::partition_range& pr,
|
|
tracing::trace_state_ptr trace_state,
|
|
clock_type::time_point timeout,
|
|
query::digest_algorithm da,
|
|
uint64_t max_size = query::result_memory_limiter::maximum_result_size);
|
|
future<coordinator_query_result> query_partition_key_range(lw_shared_ptr<query::read_command> cmd,
|
|
dht::partition_range_vector partition_ranges,
|
|
db::consistency_level cl,
|
|
coordinator_query_options optional_params);
|
|
float estimate_result_rows_per_range(lw_shared_ptr<query::read_command> cmd, keyspace& ks);
|
|
static std::vector<gms::inet_address> intersection(const std::vector<gms::inet_address>& l1, const std::vector<gms::inet_address>& l2);
|
|
future<std::vector<foreign_ptr<lw_shared_ptr<query::result>>>, replicas_per_token_range> query_partition_key_range_concurrent(clock_type::time_point timeout,
|
|
std::vector<foreign_ptr<lw_shared_ptr<query::result>>>&& results,
|
|
lw_shared_ptr<query::read_command> cmd,
|
|
db::consistency_level cl,
|
|
query_ranges_to_vnodes_generator&& ranges_to_vnodes,
|
|
int concurrency_factor,
|
|
tracing::trace_state_ptr trace_state,
|
|
uint32_t remaining_row_count,
|
|
uint32_t remaining_partition_count,
|
|
replicas_per_token_range preferred_replicas,
|
|
service_permit permit);
|
|
|
|
future<coordinator_query_result> do_query(schema_ptr,
|
|
lw_shared_ptr<query::read_command> cmd,
|
|
dht::partition_range_vector&& partition_ranges,
|
|
db::consistency_level cl,
|
|
coordinator_query_options optional_params);
|
|
template<typename Range, typename CreateWriteHandler>
|
|
future<std::vector<unique_response_handler>> mutate_prepare(Range&& mutations, db::consistency_level cl, db::write_type type, service_permit permit, CreateWriteHandler handler);
|
|
template<typename Range>
|
|
future<std::vector<unique_response_handler>> mutate_prepare(Range&& mutations, db::consistency_level cl, db::write_type type, tracing::trace_state_ptr tr_state, service_permit permit);
|
|
future<> mutate_begin(std::vector<unique_response_handler> ids, db::consistency_level cl, std::optional<clock_type::time_point> timeout_opt = { });
|
|
future<> mutate_end(future<> mutate_result, utils::latency_counter, write_stats& stats, tracing::trace_state_ptr trace_state);
|
|
future<> schedule_repair(std::unordered_map<dht::token, std::unordered_map<gms::inet_address, std::optional<mutation>>> diffs, db::consistency_level cl, tracing::trace_state_ptr trace_state, service_permit permit);
|
|
bool need_throttle_writes() const;
|
|
void unthrottle();
|
|
void handle_read_error(std::exception_ptr eptr, bool range);
|
|
template<typename Range>
|
|
future<> mutate_internal(Range mutations, db::consistency_level cl, bool counter_write, tracing::trace_state_ptr tr_state, service_permit permit, std::optional<clock_type::time_point> timeout_opt = { });
|
|
future<foreign_ptr<lw_shared_ptr<reconcilable_result>>, cache_temperature> query_nonsingular_mutations_locally(
|
|
schema_ptr s, lw_shared_ptr<query::read_command> cmd, const dht::partition_range_vector&& pr, tracing::trace_state_ptr trace_state,
|
|
uint64_t max_size, clock_type::time_point timeout);
|
|
|
|
future<> mutate_counters_on_leader(std::vector<frozen_mutation_and_schema> mutations, db::consistency_level cl, clock_type::time_point timeout,
|
|
tracing::trace_state_ptr trace_state, service_permit permit);
|
|
future<> mutate_counter_on_leader_and_replicate(const schema_ptr& s, frozen_mutation m, db::consistency_level cl, clock_type::time_point timeout,
|
|
tracing::trace_state_ptr trace_state, service_permit permit);
|
|
|
|
gms::inet_address find_leader_for_counter_update(const mutation& m, db::consistency_level cl);
|
|
|
|
future<> do_mutate(std::vector<mutation> mutations, db::consistency_level cl, clock_type::time_point timeout, tracing::trace_state_ptr tr_state, service_permit permit, bool);
|
|
|
|
future<> send_to_endpoint(
|
|
std::unique_ptr<mutation_holder> m,
|
|
gms::inet_address target,
|
|
std::vector<gms::inet_address> pending_endpoints,
|
|
db::write_type type,
|
|
write_stats& stats,
|
|
allow_hints allow_hints = allow_hints::yes);
|
|
|
|
db::view::update_backlog get_view_update_backlog() const;
|
|
|
|
void maybe_update_view_backlog_of(gms::inet_address, std::optional<db::view::update_backlog>);
|
|
|
|
db::view::update_backlog get_backlog_of(gms::inet_address) const;
|
|
public:
|
|
storage_proxy(distributed<database>& db, config cfg, db::view::node_update_backlog& max_view_update_backlog);
|
|
~storage_proxy();
|
|
const distributed<database>& get_db() const {
|
|
return _db;
|
|
}
|
|
distributed<database>& get_db() {
|
|
return _db;
|
|
}
|
|
|
|
view_update_handlers_list& get_view_update_handlers_list() {
|
|
return *_view_update_handlers_list;
|
|
}
|
|
|
|
response_id_type get_next_response_id() {
|
|
auto next = _next_response_id++;
|
|
if (next == 0) { // 0 is reserved for unique_response_handler
|
|
next = _next_response_id++;
|
|
}
|
|
return next;
|
|
}
|
|
void init_messaging_service();
|
|
|
|
// Applies mutation on this node.
|
|
// Resolves with timed_out_error when timeout is reached.
|
|
future<> mutate_locally(const mutation& m, clock_type::time_point timeout = clock_type::time_point::max());
|
|
// Applies mutation on this node.
|
|
// Resolves with timed_out_error when timeout is reached.
|
|
future<> mutate_locally(const schema_ptr&, const frozen_mutation& m, clock_type::time_point timeout = clock_type::time_point::max());
|
|
// Applies mutations on this node.
|
|
// Resolves with timed_out_error when timeout is reached.
|
|
future<> mutate_locally(std::vector<mutation> mutation, clock_type::time_point timeout = clock_type::time_point::max());
|
|
|
|
future<> mutate_streaming_mutation(const schema_ptr&, utils::UUID plan_id, const frozen_mutation& m, bool fragmented);
|
|
|
|
/**
|
|
* Use this method to have these Mutations applied
|
|
* across all replicas. This method will take care
|
|
* of the possibility of a replica being down and hint
|
|
* the data across to some other replica.
|
|
*
|
|
* @param mutations the mutations to be applied across the replicas
|
|
* @param consistency_level the consistency level for the operation
|
|
* @param tr_state trace state handle
|
|
*/
|
|
future<> mutate(std::vector<mutation> mutations, db::consistency_level cl, clock_type::time_point timeout, tracing::trace_state_ptr tr_state, service_permit permit, bool raw_counters = false);
|
|
|
|
future<> replicate_counter_from_leader(mutation m, db::consistency_level cl, tracing::trace_state_ptr tr_state,
|
|
clock_type::time_point timeout, service_permit permit);
|
|
|
|
template<typename Range>
|
|
future<> mutate_counters(Range&& mutations, db::consistency_level cl, tracing::trace_state_ptr tr_state, service_permit permit, clock_type::time_point timeout);
|
|
|
|
future<> mutate_with_triggers(std::vector<mutation> mutations, db::consistency_level cl, clock_type::time_point timeout,
|
|
bool should_mutate_atomically, tracing::trace_state_ptr tr_state, service_permit permit, bool raw_counters = false);
|
|
|
|
/**
|
|
* See mutate. Adds additional steps before and after writing a batch.
|
|
* Before writing the batch (but after doing availability check against the FD for the row replicas):
|
|
* write the entire batch to a batchlog elsewhere in the cluster.
|
|
* After: remove the batchlog entry (after writing hints for the batch rows, if necessary).
|
|
*
|
|
* @param mutations the Mutations to be applied across the replicas
|
|
* @param consistency_level the consistency level for the operation
|
|
* @param tr_state trace state handle
|
|
*/
|
|
future<> mutate_atomically(std::vector<mutation> mutations, db::consistency_level cl, clock_type::time_point timeout, tracing::trace_state_ptr tr_state, service_permit permit);
|
|
|
|
// Send a mutation to one specific remote target.
|
|
// Inspired by Cassandra's StorageProxy.sendToHintedEndpoints but without
|
|
// hinted handoff support, and just one target. See also
|
|
// send_to_live_endpoints() - another take on the same original function.
|
|
future<> send_to_endpoint(frozen_mutation_and_schema fm_a_s, gms::inet_address target, std::vector<gms::inet_address> pending_endpoints, db::write_type type, write_stats& stats, allow_hints allow_hints = allow_hints::yes);
|
|
future<> send_to_endpoint(frozen_mutation_and_schema fm_a_s, gms::inet_address target, std::vector<gms::inet_address> pending_endpoints, db::write_type type, allow_hints allow_hints = allow_hints::yes);
|
|
|
|
/**
|
|
* Performs the truncate operatoin, which effectively deletes all data from
|
|
* the column family cfname
|
|
* @param keyspace
|
|
* @param cfname
|
|
*/
|
|
future<> truncate_blocking(sstring keyspace, sstring cfname);
|
|
|
|
/*
|
|
* Executes data query on the whole cluster.
|
|
*
|
|
* Partitions for each range will be ordered according to decorated_key ordering. Results for
|
|
* each range from "partition_ranges" may appear in any order.
|
|
*
|
|
* Will consider the preferred_replicas provided by the caller when selecting the replicas to
|
|
* send read requests to. However this is merely a hint and it is not guaranteed that the read
|
|
* requests will be sent to all or any of the listed replicas. After the query is done the list
|
|
* of replicas that served it is also returned.
|
|
*
|
|
* IMPORTANT: Not all fibers started by this method have to be done by the time it returns so no
|
|
* parameter can be changed after being passed to this method.
|
|
*/
|
|
future<coordinator_query_result> query(schema_ptr,
|
|
lw_shared_ptr<query::read_command> cmd,
|
|
dht::partition_range_vector&& partition_ranges,
|
|
db::consistency_level cl,
|
|
coordinator_query_options optional_params);
|
|
|
|
future<foreign_ptr<lw_shared_ptr<reconcilable_result>>, cache_temperature> query_mutations_locally(
|
|
schema_ptr, lw_shared_ptr<query::read_command> cmd, const dht::partition_range&,
|
|
clock_type::time_point timeout,
|
|
tracing::trace_state_ptr trace_state = nullptr,
|
|
uint64_t max_size = query::result_memory_limiter::maximum_result_size);
|
|
|
|
|
|
future<foreign_ptr<lw_shared_ptr<reconcilable_result>>, cache_temperature> query_mutations_locally(
|
|
schema_ptr, lw_shared_ptr<query::read_command> cmd, const ::compat::one_or_two_partition_ranges&,
|
|
clock_type::time_point timeout,
|
|
tracing::trace_state_ptr trace_state = nullptr,
|
|
uint64_t max_size = query::result_memory_limiter::maximum_result_size);
|
|
|
|
future<foreign_ptr<lw_shared_ptr<reconcilable_result>>, cache_temperature> query_mutations_locally(
|
|
schema_ptr s, lw_shared_ptr<query::read_command> cmd, const dht::partition_range_vector& pr,
|
|
clock_type::time_point timeout,
|
|
tracing::trace_state_ptr trace_state = nullptr,
|
|
uint64_t max_size = query::result_memory_limiter::maximum_result_size);
|
|
|
|
|
|
future<> stop();
|
|
future<> start_hints_manager(shared_ptr<gms::gossiper> gossiper_ptr, shared_ptr<service::storage_service> ss_ptr);
|
|
void allow_replaying_hints() noexcept;
|
|
future<> drain_on_shutdown();
|
|
|
|
const stats& get_stats() const {
|
|
return _stats;
|
|
}
|
|
|
|
virtual void on_join_cluster(const gms::inet_address& endpoint) override;
|
|
virtual void on_leave_cluster(const gms::inet_address& endpoint) override;
|
|
virtual void on_up(const gms::inet_address& endpoint) override;
|
|
virtual void on_down(const gms::inet_address& endpoint) override;
|
|
|
|
friend class abstract_read_executor;
|
|
friend class abstract_write_response_handler;
|
|
friend class speculating_read_executor;
|
|
friend class view_update_backlog_broker;
|
|
friend class view_update_write_response_handler;
|
|
};
|
|
|
|
extern distributed<storage_proxy> _the_storage_proxy;
|
|
|
|
inline distributed<storage_proxy>& get_storage_proxy() {
|
|
return _the_storage_proxy;
|
|
}
|
|
|
|
inline storage_proxy& get_local_storage_proxy() {
|
|
return _the_storage_proxy.local();
|
|
}
|
|
|
|
inline shared_ptr<storage_proxy> get_local_shared_storage_proxy() {
|
|
return _the_storage_proxy.local_shared();
|
|
}
|
|
|
|
dht::partition_range_vector get_restricted_ranges(locator::token_metadata&,
|
|
const schema&, dht::partition_range);
|
|
|
|
}
|