Compare commits
95 Commits
next
...
scylla-5.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
429b696bbc | ||
|
|
a89867d8c2 | ||
|
|
6ad94fedf3 | ||
|
|
a6188d6abc | ||
|
|
50095cc3a5 | ||
|
|
7b2215d8e0 | ||
|
|
da9f90362d | ||
|
|
c9a17c80f6 | ||
|
|
7242c42089 | ||
|
|
70ff69afab | ||
|
|
5fd4bb853b | ||
|
|
313649e86d | ||
|
|
14d8cec130 | ||
|
|
203cbb79a1 | ||
|
|
51f19d1b8c | ||
|
|
83735ae77f | ||
|
|
9d384e3af2 | ||
|
|
0da0c94f49 | ||
|
|
1a9f51b767 | ||
|
|
dba0e604a7 | ||
|
|
4ea67940cb | ||
|
|
a8c49c44e5 | ||
|
|
12a29edf90 | ||
|
|
3e10c3fc89 | ||
|
|
f11deb5074 | ||
|
|
1baf9dddd7 | ||
|
|
9717ff5057 | ||
|
|
b293b1446f | ||
|
|
e6f7ac17f6 | ||
|
|
36619fc7d9 | ||
|
|
750414c196 | ||
|
|
128050e984 | ||
|
|
d70751fee3 | ||
|
|
1fba43c317 | ||
|
|
e380c24c69 | ||
|
|
76a76a95f4 | ||
|
|
f6837afec7 | ||
|
|
6350c8836d | ||
|
|
5457948437 | ||
|
|
da41001b5c | ||
|
|
dd61e8634c | ||
|
|
b642b4c30e | ||
|
|
c013336121 | ||
|
|
b6b35ce061 | ||
|
|
069e38f02d | ||
|
|
61a8003ad1 | ||
|
|
8a17066961 | ||
|
|
487ba9f3e1 | ||
|
|
bd4f9e3615 | ||
|
|
c68deb2461 | ||
|
|
dd96d3017a | ||
|
|
6ca80ee118 | ||
|
|
eee8f750cc | ||
|
|
8d5206e6c6 | ||
|
|
cfa40402f4 | ||
|
|
2d170e51cf | ||
|
|
860e79e4b1 | ||
|
|
908a82bea0 | ||
|
|
39158f55d0 | ||
|
|
22c1685b3d | ||
|
|
9ba6fc73f1 | ||
|
|
f2e2c0127a | ||
|
|
363ea87f51 | ||
|
|
c49fd6f176 | ||
|
|
3114589a30 | ||
|
|
34f68a4c0f | ||
|
|
b336e11f59 | ||
|
|
9ef73d7e36 | ||
|
|
8700a72b4c | ||
|
|
886dd3e1d2 | ||
|
|
f565f3de06 | ||
|
|
76ff6d981c | ||
|
|
f924f59055 | ||
|
|
d5cef05810 | ||
|
|
e0f4e99e9b | ||
|
|
6795715011 | ||
|
|
aa9e91c376 | ||
|
|
ddfb9ebab2 | ||
|
|
d58a3e4d16 | ||
|
|
2ebac52d2d | ||
|
|
b536614913 | ||
|
|
85df0fd2b1 | ||
|
|
cdf9fe7023 | ||
|
|
8ff4717fd0 | ||
|
|
291b1f6e7f | ||
|
|
b2699743cc | ||
|
|
50ae73a4bd | ||
|
|
c3dd4a2b87 | ||
|
|
0f9fe61d91 | ||
|
|
59d30ff241 | ||
|
|
fb82dff89e | ||
|
|
b588b19620 | ||
|
|
608ef92a71 | ||
|
|
d2732b2663 | ||
|
|
34ab98e1be |
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "seastar"]
|
||||
path = seastar
|
||||
url = ../seastar
|
||||
url = ../scylla-seastar
|
||||
ignore = dirty
|
||||
[submodule "swagger-ui"]
|
||||
path = swagger-ui
|
||||
|
||||
@@ -72,7 +72,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.2.0-dev
|
||||
VERSION=5.2.0
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -145,19 +145,24 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
auto table = find_table(_proxy, request);
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto cfs = db.get_tables();
|
||||
auto i = cfs.begin();
|
||||
auto e = cfs.end();
|
||||
|
||||
if (limit < 1) {
|
||||
throw api_error::validation("Limit must be 1 or more");
|
||||
}
|
||||
|
||||
// TODO: the unordered_map here is not really well suited for partial
|
||||
// querying - we're sorting on local hash order, and creating a table
|
||||
// between queries may or may not miss info. But that should be rare,
|
||||
// and we can probably expect this to be a single call.
|
||||
// # 12601 (maybe?) - sort the set of tables on ID. This should ensure we never
|
||||
// generate duplicates in a paged listing here. Can obviously miss things if they
|
||||
// are added between paged calls and end up with a "smaller" UUID/ARN, but that
|
||||
// is to be expected.
|
||||
std::sort(cfs.begin(), cfs.end(), [](const data_dictionary::table& t1, const data_dictionary::table& t2) {
|
||||
return t1.schema()->id().uuid() < t2.schema()->id().uuid();
|
||||
});
|
||||
|
||||
auto i = cfs.begin();
|
||||
auto e = cfs.end();
|
||||
|
||||
if (streams_start) {
|
||||
i = std::find_if(i, e, [&](data_dictionary::table t) {
|
||||
i = std::find_if(i, e, [&](const data_dictionary::table& t) {
|
||||
return t.schema()->id().uuid() == streams_start
|
||||
&& cdc::get_base_table(db.real_database(), *t.schema())
|
||||
&& is_alternator_keyspace(t.schema()->ks_name())
|
||||
|
||||
@@ -647,6 +647,7 @@ sstables::compaction_stopped_exception compaction_manager::task::make_compaction
|
||||
|
||||
compaction_manager::compaction_manager(config cfg, abort_source& as)
|
||||
: _cfg(std::move(cfg))
|
||||
, _compaction_submission_timer(compaction_sg().cpu, compaction_submission_callback())
|
||||
, _compaction_controller(make_compaction_controller(compaction_sg(), static_shares(), [this] () -> float {
|
||||
_last_backlog = backlog();
|
||||
auto b = _last_backlog / available_memory();
|
||||
@@ -681,6 +682,7 @@ compaction_manager::compaction_manager(config cfg, abort_source& as)
|
||||
|
||||
compaction_manager::compaction_manager()
|
||||
: _cfg(config{ .available_memory = 1 })
|
||||
, _compaction_submission_timer(compaction_sg().cpu, compaction_submission_callback())
|
||||
, _compaction_controller(make_compaction_controller(compaction_sg(), 1, [] () -> float { return 1.0; }))
|
||||
, _backlog_manager(_compaction_controller)
|
||||
, _throughput_updater(serialized_action([this] { return update_throughput(throughput_mbs()); }))
|
||||
@@ -738,7 +740,7 @@ void compaction_manager::register_metrics() {
|
||||
void compaction_manager::enable() {
|
||||
assert(_state == state::none || _state == state::disabled);
|
||||
_state = state::enabled;
|
||||
_compaction_submission_timer.arm(periodic_compaction_submission_interval());
|
||||
_compaction_submission_timer.arm_periodic(periodic_compaction_submission_interval());
|
||||
_waiting_reevalution = postponed_compactions_reevaluation();
|
||||
}
|
||||
|
||||
|
||||
@@ -296,10 +296,10 @@ private:
|
||||
std::function<void()> compaction_submission_callback();
|
||||
// all registered tables are reevaluated at a constant interval.
|
||||
// Submission is a NO-OP when there's nothing to do, so it's fine to call it regularly.
|
||||
timer<lowres_clock> _compaction_submission_timer = timer<lowres_clock>(compaction_submission_callback());
|
||||
static constexpr std::chrono::seconds periodic_compaction_submission_interval() { return std::chrono::seconds(3600); }
|
||||
|
||||
config _cfg;
|
||||
timer<lowres_clock> _compaction_submission_timer;
|
||||
compaction_controller _compaction_controller;
|
||||
compaction_backlog_manager _backlog_manager;
|
||||
optimized_optional<abort_source::subscription> _early_abort_subscription;
|
||||
|
||||
@@ -409,7 +409,9 @@ public:
|
||||
l0_old_ssts.push_back(std::move(sst));
|
||||
}
|
||||
}
|
||||
_l0_scts.replace_sstables(std::move(l0_old_ssts), std::move(l0_new_ssts));
|
||||
if (l0_old_ssts.size() || l0_new_ssts.size()) {
|
||||
_l0_scts.replace_sstables(std::move(l0_old_ssts), std::move(l0_new_ssts));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -553,4 +553,16 @@ murmur3_partitioner_ignore_msb_bits: 12
|
||||
# WARNING: It's unsafe to set this to false if the node previously booted
|
||||
# with the schema commit log enabled. In such case, some schema changes
|
||||
# may be lost if the node was not cleanly stopped.
|
||||
force_schema_commit_log: true
|
||||
force_schema_commit_log: true
|
||||
|
||||
# Use Raft to consistently manage schema information in the cluster.
|
||||
# Refer to https://docs.scylladb.com/master/architecture/raft.html for more details.
|
||||
# The 'Handling Failures' section is especially important.
|
||||
#
|
||||
# Once enabled in a cluster, this cannot be turned off.
|
||||
# If you want to bootstrap a new cluster without Raft, make sure to set this to `false`
|
||||
# before starting your nodes for the first time.
|
||||
#
|
||||
# A cluster not using Raft can be 'upgraded' to use Raft. Refer to the aforementioned
|
||||
# documentation, section 'Enabling Raft in ScyllaDB 5.2 and further', for the procedure.
|
||||
consistent_cluster_management: true
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include "cql3/attributes.hh"
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include <optional>
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
@@ -55,9 +56,9 @@ int64_t attributes::get_timestamp(int64_t now, const query_options& options) {
|
||||
}
|
||||
}
|
||||
|
||||
int32_t attributes::get_time_to_live(const query_options& options) {
|
||||
std::optional<int32_t> attributes::get_time_to_live(const query_options& options) {
|
||||
if (!_time_to_live.has_value() || _time_to_live_unset_guard.is_unset(options))
|
||||
return 0;
|
||||
return std::nullopt;
|
||||
|
||||
cql3::raw_value tval = expr::evaluate(*_time_to_live, options);
|
||||
if (tval.is_null()) {
|
||||
|
||||
@@ -45,7 +45,7 @@ public:
|
||||
|
||||
int64_t get_timestamp(int64_t now, const query_options& options);
|
||||
|
||||
int32_t get_time_to_live(const query_options& options);
|
||||
std::optional<int32_t> get_time_to_live(const query_options& options);
|
||||
|
||||
db::timeout_clock::duration get_timeout(const query_options& options) const;
|
||||
|
||||
|
||||
@@ -1416,7 +1416,7 @@ expression search_and_replace(const expression& e,
|
||||
};
|
||||
},
|
||||
[&] (const binary_operator& oper) -> expression {
|
||||
return binary_operator(recurse(oper.lhs), oper.op, recurse(oper.rhs));
|
||||
return binary_operator(recurse(oper.lhs), oper.op, recurse(oper.rhs), oper.order);
|
||||
},
|
||||
[&] (const column_mutation_attribute& cma) -> expression {
|
||||
return column_mutation_attribute{cma.kind, recurse(cma.column)};
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include "cql3/lists.hh"
|
||||
#include "cql3/constants.hh"
|
||||
#include "cql3/user_types.hh"
|
||||
#include "cql3/ut_name.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
#include "cql3/functions/user_function.hh"
|
||||
#include "cql3/functions/user_aggregate.hh"
|
||||
@@ -52,6 +53,13 @@ bool abstract_function::requires_thread() const { return false; }
|
||||
|
||||
bool as_json_function::requires_thread() const { return false; }
|
||||
|
||||
static bool same_signature(const shared_ptr<function>& f1, const shared_ptr<function>& f2) {
|
||||
if (f1 == nullptr || f2 == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return f1->name() == f2->name() && f1->arg_types() == f2->arg_types();
|
||||
}
|
||||
|
||||
thread_local std::unordered_multimap<function_name, shared_ptr<function>> functions::_declared = init();
|
||||
|
||||
void functions::clear_functions() noexcept {
|
||||
@@ -143,22 +151,56 @@ void functions::replace_function(shared_ptr<function> func) {
|
||||
with_udf_iter(func->name(), func->arg_types(), [func] (functions::declared_t::iterator i) {
|
||||
i->second = std::move(func);
|
||||
});
|
||||
auto scalar_func = dynamic_pointer_cast<scalar_function>(func);
|
||||
if (!scalar_func) {
|
||||
return;
|
||||
}
|
||||
for (auto& fit : _declared) {
|
||||
auto aggregate = dynamic_pointer_cast<user_aggregate>(fit.second);
|
||||
if (aggregate && (same_signature(aggregate->sfunc(), scalar_func)
|
||||
|| (same_signature(aggregate->finalfunc(), scalar_func))
|
||||
|| (same_signature(aggregate->reducefunc(), scalar_func))))
|
||||
{
|
||||
// we need to replace at least one underlying function
|
||||
shared_ptr<scalar_function> sfunc = same_signature(aggregate->sfunc(), scalar_func) ? scalar_func : aggregate->sfunc();
|
||||
shared_ptr<scalar_function> finalfunc = same_signature(aggregate->finalfunc(), scalar_func) ? scalar_func : aggregate->finalfunc();
|
||||
shared_ptr<scalar_function> reducefunc = same_signature(aggregate->reducefunc(), scalar_func) ? scalar_func : aggregate->reducefunc();
|
||||
fit.second = ::make_shared<user_aggregate>(aggregate->name(), aggregate->initcond(), sfunc, reducefunc, finalfunc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void functions::remove_function(const function_name& name, const std::vector<data_type>& arg_types) {
|
||||
with_udf_iter(name, arg_types, [] (functions::declared_t::iterator i) { _declared.erase(i); });
|
||||
}
|
||||
|
||||
std::optional<function_name> functions::used_by_user_aggregate(const function_name& name) {
|
||||
std::optional<function_name> functions::used_by_user_aggregate(shared_ptr<user_function> func) {
|
||||
for (const shared_ptr<function>& fptr : _declared | boost::adaptors::map_values) {
|
||||
auto aggregate = dynamic_pointer_cast<user_aggregate>(fptr);
|
||||
if (aggregate && (aggregate->sfunc().name() == name || (aggregate->has_finalfunc() && aggregate->finalfunc().name() == name))) {
|
||||
if (aggregate && (same_signature(aggregate->sfunc(), func)
|
||||
|| (same_signature(aggregate->finalfunc(), func))
|
||||
|| (same_signature(aggregate->reducefunc(), func))))
|
||||
{
|
||||
return aggregate->name();
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<function_name> functions::used_by_user_function(const ut_name& user_type) {
|
||||
for (const shared_ptr<function>& fptr : _declared | boost::adaptors::map_values) {
|
||||
for (auto& arg_type : fptr->arg_types()) {
|
||||
if (arg_type->references_user_type(user_type.get_keyspace(), user_type.get_user_type_name())) {
|
||||
return fptr->name();
|
||||
}
|
||||
}
|
||||
if (fptr->return_type()->references_user_type(user_type.get_keyspace(), user_type.get_user_type_name())) {
|
||||
return fptr->name();
|
||||
}
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
functions::make_arg_spec(const sstring& receiver_ks, const sstring& receiver_cf,
|
||||
const function& fun, size_t i) {
|
||||
|
||||
@@ -71,7 +71,8 @@ public:
|
||||
static void add_function(shared_ptr<function>);
|
||||
static void replace_function(shared_ptr<function>);
|
||||
static void remove_function(const function_name& name, const std::vector<data_type>& arg_types);
|
||||
static std::optional<function_name> used_by_user_aggregate(const function_name& name);
|
||||
static std::optional<function_name> used_by_user_aggregate(shared_ptr<user_function>);
|
||||
static std::optional<function_name> used_by_user_function(const ut_name& user_type);
|
||||
private:
|
||||
template <typename F>
|
||||
static void with_udf_iter(const function_name& name, const std::vector<data_type>& arg_types, F&& f);
|
||||
|
||||
@@ -37,14 +37,14 @@ public:
|
||||
virtual sstring element_type() const override { return "aggregate"; }
|
||||
virtual std::ostream& describe(std::ostream& os) const override;
|
||||
|
||||
const scalar_function& sfunc() const {
|
||||
return *_sfunc;
|
||||
seastar::shared_ptr<scalar_function> sfunc() const {
|
||||
return _sfunc;
|
||||
}
|
||||
const scalar_function& reducefunc() const {
|
||||
return *_reducefunc;
|
||||
seastar::shared_ptr<scalar_function> reducefunc() const {
|
||||
return _reducefunc;
|
||||
}
|
||||
const scalar_function& finalfunc() const {
|
||||
return *_finalfunc;
|
||||
seastar::shared_ptr<scalar_function> finalfunc() const {
|
||||
return _finalfunc;
|
||||
}
|
||||
const bytes_opt& initcond() const {
|
||||
return _initcond;
|
||||
|
||||
@@ -135,12 +135,21 @@ void query_options::prepare(const std::vector<lw_shared_ptr<column_specification
|
||||
ordered_values.reserve(specs.size());
|
||||
for (auto&& spec : specs) {
|
||||
auto& spec_name = spec->name->text();
|
||||
bool found_value_for_name = false;
|
||||
for (size_t j = 0; j < names.size(); j++) {
|
||||
if (names[j] == spec_name) {
|
||||
ordered_values.emplace_back(_value_views[j]);
|
||||
found_value_for_name = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// No bound value was found with the name `spec_name`.
|
||||
// This means that the user forgot to include a bound value with such name.
|
||||
if (!found_value_for_name) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
format("Missing value for bind marker with name: {}", spec_name));
|
||||
}
|
||||
}
|
||||
_value_views = std::move(ordered_values);
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "db/config.hh"
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
#include "hashers.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
@@ -600,6 +601,14 @@ query_processor::get_statement(const sstring_view& query, const service::client_
|
||||
std::unique_ptr<raw::parsed_statement>
|
||||
query_processor::parse_statement(const sstring_view& query) {
|
||||
try {
|
||||
{
|
||||
const char* error_injection_key = "query_processor-parse_statement-test_failure";
|
||||
utils::get_local_injector().inject(error_injection_key, [&]() {
|
||||
if (query.find(error_injection_key) != sstring_view::npos) {
|
||||
throw std::runtime_error(error_injection_key);
|
||||
}
|
||||
});
|
||||
}
|
||||
auto statement = util::do_with_parser(query, std::mem_fn(&cql3_parser::CqlParser::query));
|
||||
if (!statement) {
|
||||
throw exceptions::syntax_exception("Parsing failed");
|
||||
|
||||
@@ -80,7 +80,7 @@ public:
|
||||
|
||||
virtual sstring assignment_testable_source_context() const override {
|
||||
auto&& name = _type->field_name(_field);
|
||||
auto sname = sstring(reinterpret_cast<const char*>(name.begin(), name.size()));
|
||||
auto sname = std::string_view(reinterpret_cast<const char*>(name.data()), name.size());
|
||||
return format("{}.{}", _selected, sname);
|
||||
}
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ drop_function_statement::prepare_schema_mutations(query_processor& qp, api::time
|
||||
if (!user_func) {
|
||||
throw exceptions::invalid_request_exception(format("'{}' is not a user defined function", func));
|
||||
}
|
||||
if (auto aggregate = functions::functions::used_by_user_aggregate(user_func->name()); bool(aggregate)) {
|
||||
if (auto aggregate = functions::functions::used_by_user_aggregate(user_func)) {
|
||||
throw exceptions::invalid_request_exception(format("Cannot delete function {}, as it is used by user-defined aggregate {}", func, *aggregate));
|
||||
}
|
||||
m = co_await qp.get_migration_manager().prepare_function_drop_announcement(user_func, ts);
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "cql3/statements/drop_type_statement.hh"
|
||||
#include "cql3/statements/prepared_statement.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/functions/functions.hh"
|
||||
|
||||
#include "boost/range/adaptor/map.hpp"
|
||||
|
||||
@@ -109,6 +110,9 @@ void drop_type_statement::validate_while_executing(query_processor& qp) const {
|
||||
}
|
||||
}
|
||||
|
||||
if (auto&& fun_name = functions::functions::used_by_user_function(_name)) {
|
||||
throw exceptions::invalid_request_exception(format("Cannot drop user type {}.{} as it is still used by function {}", keyspace, type->get_name_as_string(), *fun_name));
|
||||
}
|
||||
} catch (data_dictionary::no_such_keyspace& e) {
|
||||
throw exceptions::invalid_request_exception(format("Cannot drop type in unknown keyspace {}", keyspace()));
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
#include "cql3/util.hh"
|
||||
#include "validation.hh"
|
||||
#include "db/consistency_level_validations.hh"
|
||||
#include <optional>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
@@ -95,8 +96,9 @@ bool modification_statement::is_timestamp_set() const {
|
||||
return attrs->is_timestamp_set();
|
||||
}
|
||||
|
||||
gc_clock::duration modification_statement::get_time_to_live(const query_options& options) const {
|
||||
return gc_clock::duration(attrs->get_time_to_live(options));
|
||||
std::optional<gc_clock::duration> modification_statement::get_time_to_live(const query_options& options) const {
|
||||
std::optional<int32_t> ttl = attrs->get_time_to_live(options);
|
||||
return ttl ? std::make_optional<gc_clock::duration>(*ttl) : std::nullopt;
|
||||
}
|
||||
|
||||
future<> modification_statement::check_access(query_processor& qp, const service::client_state& state) const {
|
||||
|
||||
@@ -130,7 +130,7 @@ public:
|
||||
|
||||
bool is_timestamp_set() const;
|
||||
|
||||
gc_clock::duration get_time_to_live(const query_options& options) const;
|
||||
std::optional<gc_clock::duration> get_time_to_live(const query_options& options) const;
|
||||
|
||||
virtual future<> check_access(query_processor& qp, const service::client_state& state) const override;
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ public:
|
||||
};
|
||||
// Note: value (mutation) only required to contain the rows we are interested in
|
||||
private:
|
||||
const gc_clock::duration _ttl;
|
||||
const std::optional<gc_clock::duration> _ttl;
|
||||
// For operations that require a read-before-write, stores prefetched cell values.
|
||||
// For CAS statements, stores values of conditioned columns.
|
||||
// Is a reference to an outside prefetch_data container since a CAS BATCH statement
|
||||
@@ -106,7 +106,7 @@ public:
|
||||
const query_options& _options;
|
||||
|
||||
update_parameters(const schema_ptr schema_, const query_options& options,
|
||||
api::timestamp_type timestamp, gc_clock::duration ttl, const prefetch_data& prefetched)
|
||||
api::timestamp_type timestamp, std::optional<gc_clock::duration> ttl, const prefetch_data& prefetched)
|
||||
: _ttl(ttl)
|
||||
, _prefetched(prefetched)
|
||||
, _timestamp(timestamp)
|
||||
@@ -127,11 +127,7 @@ public:
|
||||
}
|
||||
|
||||
atomic_cell make_cell(const abstract_type& type, const raw_value_view& value, atomic_cell::collection_member cm = atomic_cell::collection_member::no) const {
|
||||
auto ttl = _ttl;
|
||||
|
||||
if (ttl.count() <= 0) {
|
||||
ttl = _schema->default_time_to_live();
|
||||
}
|
||||
auto ttl = this->ttl();
|
||||
|
||||
return value.with_value([&] (const FragmentedView auto& v) {
|
||||
if (ttl.count() > 0) {
|
||||
@@ -143,11 +139,7 @@ public:
|
||||
};
|
||||
|
||||
atomic_cell make_cell(const abstract_type& type, const managed_bytes_view& value, atomic_cell::collection_member cm = atomic_cell::collection_member::no) const {
|
||||
auto ttl = _ttl;
|
||||
|
||||
if (ttl.count() <= 0) {
|
||||
ttl = _schema->default_time_to_live();
|
||||
}
|
||||
auto ttl = this->ttl();
|
||||
|
||||
if (ttl.count() > 0) {
|
||||
return atomic_cell::make_live(type, _timestamp, value, _local_deletion_time + ttl, ttl, cm);
|
||||
@@ -169,7 +161,7 @@ public:
|
||||
}
|
||||
|
||||
gc_clock::duration ttl() const {
|
||||
return _ttl.count() > 0 ? _ttl : _schema->default_time_to_live();
|
||||
return _ttl.value_or(_schema->default_time_to_live());
|
||||
}
|
||||
|
||||
gc_clock::time_point expiry() const {
|
||||
|
||||
@@ -59,7 +59,7 @@ public:
|
||||
}
|
||||
|
||||
_end_of_stream = false;
|
||||
forward_buffer_to(pr.start());
|
||||
clear_buffer();
|
||||
return _underlying->fast_forward_to(std::move(pr));
|
||||
}
|
||||
|
||||
|
||||
@@ -1671,9 +1671,9 @@ future<db::commitlog::segment_manager::sseg_ptr> db::commitlog::segment_manager:
|
||||
|
||||
align = f.disk_write_dma_alignment();
|
||||
auto is_overwrite = false;
|
||||
auto existing_size = f.known_size();
|
||||
|
||||
if ((flags & open_flags::dsync) != open_flags{}) {
|
||||
auto existing_size = f.known_size();
|
||||
is_overwrite = true;
|
||||
// would be super nice if we just could mmap(/dev/zero) and do sendto
|
||||
// instead of this, but for now we must do explicit buffer writes.
|
||||
@@ -1683,8 +1683,6 @@ future<db::commitlog::segment_manager::sseg_ptr> db::commitlog::segment_manager:
|
||||
if (existing_size > max_size) {
|
||||
co_await f.truncate(max_size);
|
||||
} else if (existing_size < max_size) {
|
||||
totals.total_size_on_disk += (max_size - existing_size);
|
||||
|
||||
clogger.trace("Pre-writing {} of {} KB to segment {}", (max_size - existing_size)/1024, max_size/1024, filename);
|
||||
|
||||
// re-open without o_dsync for pre-alloc. The reason/rationale
|
||||
@@ -1732,6 +1730,12 @@ future<db::commitlog::segment_manager::sseg_ptr> db::commitlog::segment_manager:
|
||||
co_await f.truncate(max_size);
|
||||
}
|
||||
|
||||
// #12810 - we did not update total_size_on_disk unless o_dsync was
|
||||
// on. So kept running with total == 0 -> free for all in creating new segment.
|
||||
// Always update total_size_on_disk. Will wrap-around iff existing_size > max_size.
|
||||
// That is ok.
|
||||
totals.total_size_on_disk += (max_size - existing_size);
|
||||
|
||||
if (cfg.extensions && !cfg.extensions->commitlog_file_extensions().empty()) {
|
||||
for (auto * ext : cfg.extensions->commitlog_file_extensions()) {
|
||||
auto nf = co_await ext->wrap_file(filename, f, flags);
|
||||
@@ -2116,6 +2120,9 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
clogger.debug("Discarding segments {}", ftd);
|
||||
|
||||
for (auto& [f, mode] : ftd) {
|
||||
// `f.remove_file()` resets known_size to 0, so remember the size here,
|
||||
// in order to subtract it from total_size_on_disk accurately.
|
||||
auto size = f.known_size();
|
||||
try {
|
||||
if (f) {
|
||||
co_await f.close();
|
||||
@@ -2132,7 +2139,6 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
}
|
||||
}
|
||||
|
||||
auto size = f.known_size();
|
||||
auto usage = totals.total_size_on_disk;
|
||||
auto next_usage = usage - size;
|
||||
|
||||
@@ -2165,7 +2171,7 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
// or had such an exception that we consider the file dead
|
||||
// anyway. In either case we _remove_ the file size from
|
||||
// footprint, because it is no longer our problem.
|
||||
totals.total_size_on_disk -= f.known_size();
|
||||
totals.total_size_on_disk -= size;
|
||||
}
|
||||
|
||||
// #8376 - if we had an error in recycling (disk rename?), and no elements
|
||||
|
||||
@@ -401,6 +401,10 @@ public:
|
||||
named_value<uint64_t> wasm_udf_yield_fuel;
|
||||
named_value<uint64_t> wasm_udf_total_fuel;
|
||||
named_value<size_t> wasm_udf_memory_limit;
|
||||
// wasm_udf_reserved_memory is static because the options in db::config
|
||||
// are parsed using seastar::app_template, while this option is used for
|
||||
// configuring the Seastar memory subsystem.
|
||||
static constexpr size_t wasm_udf_reserved_memory = 50 * 1024 * 1024;
|
||||
|
||||
seastar::logging_settings logging_settings(const log_cli::options&) const;
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "db/large_data_handler.hh"
|
||||
#include "sstables/sstables.hh"
|
||||
@@ -55,11 +56,11 @@ void large_data_handler::start() {
|
||||
}
|
||||
|
||||
future<> large_data_handler::stop() {
|
||||
if (!running()) {
|
||||
return make_ready_future<>();
|
||||
if (running()) {
|
||||
_running = false;
|
||||
large_data_logger.info("Waiting for {} background handlers", max_concurrency - _sem.available_units());
|
||||
co_await _sem.wait(max_concurrency);
|
||||
}
|
||||
_running = false;
|
||||
return _sem.wait(max_concurrency);
|
||||
}
|
||||
|
||||
void large_data_handler::plug_system_keyspace(db::system_keyspace& sys_ks) noexcept {
|
||||
|
||||
@@ -2216,15 +2216,15 @@ std::vector<mutation> make_create_aggregate_mutations(schema_features features,
|
||||
mutation& m = p.first;
|
||||
clustering_key& ckey = p.second;
|
||||
|
||||
data_type state_type = aggregate->sfunc().arg_types()[0];
|
||||
data_type state_type = aggregate->sfunc()->arg_types()[0];
|
||||
if (aggregate->has_finalfunc()) {
|
||||
m.set_clustered_cell(ckey, "final_func", aggregate->finalfunc().name().name, timestamp);
|
||||
m.set_clustered_cell(ckey, "final_func", aggregate->finalfunc()->name().name, timestamp);
|
||||
}
|
||||
if (aggregate->initcond()) {
|
||||
m.set_clustered_cell(ckey, "initcond", state_type->deserialize(*aggregate->initcond()).to_parsable_string(), timestamp);
|
||||
}
|
||||
m.set_clustered_cell(ckey, "return_type", aggregate->return_type()->as_cql3_type().to_string(), timestamp);
|
||||
m.set_clustered_cell(ckey, "state_func", aggregate->sfunc().name().name, timestamp);
|
||||
m.set_clustered_cell(ckey, "state_func", aggregate->sfunc()->name().name, timestamp);
|
||||
m.set_clustered_cell(ckey, "state_type", state_type->as_cql3_type().to_string(), timestamp);
|
||||
std::vector<mutation> muts = {m};
|
||||
|
||||
@@ -2233,7 +2233,7 @@ std::vector<mutation> make_create_aggregate_mutations(schema_features features,
|
||||
auto sa_p = get_mutation(sa_schema, *aggregate);
|
||||
mutation& sa_mut = sa_p.first;
|
||||
clustering_key& sa_ckey = sa_p.second;
|
||||
sa_mut.set_clustered_cell(sa_ckey, "reduce_func", aggregate->reducefunc().name().name, timestamp);
|
||||
sa_mut.set_clustered_cell(sa_ckey, "reduce_func", aggregate->reducefunc()->name().name, timestamp);
|
||||
sa_mut.set_clustered_cell(sa_ckey, "state_type", state_type->as_cql3_type().to_string(), timestamp);
|
||||
|
||||
muts.emplace_back(sa_mut);
|
||||
|
||||
@@ -295,7 +295,7 @@ future<> size_estimates_mutation_reader::fast_forward_to(const dht::partition_ra
|
||||
}
|
||||
|
||||
future<> size_estimates_mutation_reader::fast_forward_to(position_range pr) {
|
||||
forward_buffer_to(pr.start());
|
||||
clear_buffer();
|
||||
_end_of_stream = false;
|
||||
if (_partition_reader) {
|
||||
return _partition_reader->fast_forward_to(std::move(pr));
|
||||
|
||||
@@ -2276,7 +2276,10 @@ public:
|
||||
add_partition(mutation_sink, "trace_probability", format("{:.2}", tracing::tracing::get_local_tracing_instance().get_trace_probability()));
|
||||
co_await add_partition(mutation_sink, "memory", [this] () {
|
||||
struct stats {
|
||||
uint64_t total = 0;
|
||||
// take the pre-reserved memory into account, as seastar only returns
|
||||
// the stats of memory managed by the seastar allocator, but we instruct
|
||||
// it to reserve addition memory for system.
|
||||
uint64_t total = db::config::wasm_udf_reserved_memory;
|
||||
uint64_t free = 0;
|
||||
static stats reduce(stats a, stats b) { return stats{a.total + b.total, a.free + b.free}; }
|
||||
};
|
||||
@@ -3344,11 +3347,11 @@ mutation system_keyspace::make_group0_history_state_id_mutation(
|
||||
using namespace std::chrono;
|
||||
assert(*gc_older_than >= gc_clock::duration{0});
|
||||
|
||||
auto ts_millis = duration_cast<milliseconds>(microseconds{ts});
|
||||
auto gc_older_than_millis = duration_cast<milliseconds>(*gc_older_than);
|
||||
assert(gc_older_than_millis < ts_millis);
|
||||
auto ts_micros = microseconds{ts};
|
||||
auto gc_older_than_micros = duration_cast<microseconds>(*gc_older_than);
|
||||
assert(gc_older_than_micros < ts_micros);
|
||||
|
||||
auto tomb_upper_bound = utils::UUID_gen::min_time_UUID(ts_millis - gc_older_than_millis);
|
||||
auto tomb_upper_bound = utils::UUID_gen::min_time_UUID(ts_micros - gc_older_than_micros);
|
||||
// We want to delete all entries with IDs smaller than `tomb_upper_bound`
|
||||
// but the deleted range is of the form (x, +inf) since the schema is reversed.
|
||||
auto range = query::clustering_range::make_starting_with({
|
||||
|
||||
@@ -172,7 +172,7 @@ class build_progress_virtual_reader {
|
||||
}
|
||||
|
||||
virtual future<> fast_forward_to(position_range range) override {
|
||||
forward_buffer_to(range.start());
|
||||
clear_buffer();
|
||||
_end_of_stream = false;
|
||||
return _underlying.fast_forward_to(std::move(range));
|
||||
}
|
||||
|
||||
@@ -85,29 +85,25 @@ future<row_locker::lock_holder>
|
||||
row_locker::lock_ck(const dht::decorated_key& pk, const clustering_key_prefix& cpk, bool exclusive, db::timeout_clock::time_point timeout, stats& stats) {
|
||||
mylog.debug("taking shared lock on partition {}, and {} lock on row {} in it", pk, (exclusive ? "exclusive" : "shared"), cpk);
|
||||
auto tracker = latency_stats_tracker(exclusive ? stats.exclusive_row : stats.shared_row);
|
||||
auto ck = cpk;
|
||||
// Create a two-level lock entry for the partition if it doesn't exist already.
|
||||
auto i = _two_level_locks.try_emplace(pk, this).first;
|
||||
// The two-level lock entry we've just created is guaranteed to be kept alive as long as it's locked.
|
||||
// Initiating read locking in the background below ensures that even if the two-level lock is currently
|
||||
// write-locked, releasing the write-lock will synchronously engage any waiting
|
||||
// locks and will keep the entry alive.
|
||||
future<lock_type::holder> lock_partition = i->second._partition_lock.hold_read_lock(timeout);
|
||||
auto j = i->second._row_locks.find(cpk);
|
||||
if (j == i->second._row_locks.end()) {
|
||||
// Not yet locked, need to create the lock. This makes a copy of cpk.
|
||||
try {
|
||||
j = i->second._row_locks.emplace(cpk, lock_type()).first;
|
||||
} catch(...) {
|
||||
// If this emplace() failed, e.g., out of memory, we fail. We
|
||||
// could do nothing - the partition lock we already started
|
||||
// taking will be unlocked automatically after being locked.
|
||||
// But it's better form to wait for the work we started, and it
|
||||
// will also allow us to remove the hash-table row we added.
|
||||
return lock_partition.then([ex = std::current_exception()] (auto lock) {
|
||||
// The lock is automatically released when "lock" goes out of scope.
|
||||
// TODO: unlock (lock = {}) now, search for the partition in the
|
||||
// hash table (we know it's still there, because we held the lock until
|
||||
// now) and remove the unused lock from the hash table if still unused.
|
||||
return make_exception_future<row_locker::lock_holder>(std::current_exception());
|
||||
});
|
||||
return lock_partition.then([this, pk = &i->first, row_locks = &i->second._row_locks, ck = std::move(ck), exclusive, tracker = std::move(tracker), timeout] (auto lock1) mutable {
|
||||
auto j = row_locks->find(ck);
|
||||
if (j == row_locks->end()) {
|
||||
// Not yet locked, need to create the lock.
|
||||
j = row_locks->emplace(std::move(ck), lock_type()).first;
|
||||
}
|
||||
}
|
||||
return lock_partition.then([this, pk = &i->first, cpk = &j->first, &row_lock = j->second, exclusive, tracker = std::move(tracker), timeout] (auto lock1) mutable {
|
||||
auto* cpk = &j->first;
|
||||
auto& row_lock = j->second;
|
||||
// Like to the two-level lock entry above, the row_lock entry we've just created
|
||||
// is guaranteed to be kept alive as long as it's locked.
|
||||
// Initiating read/write locking in the background below ensures that.
|
||||
auto lock_row = exclusive ? row_lock.hold_write_lock(timeout) : row_lock.hold_read_lock(timeout);
|
||||
return lock_row.then([this, pk, cpk, exclusive, tracker = std::move(tracker), lock1 = std::move(lock1)] (auto lock2) mutable {
|
||||
lock1.release();
|
||||
|
||||
@@ -2523,24 +2523,28 @@ update_backlog node_update_backlog::add_fetch(unsigned shard, update_backlog bac
|
||||
return std::max(backlog, _max.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
future<bool> check_view_build_ongoing(db::system_distributed_keyspace& sys_dist_ks, const sstring& ks_name, const sstring& cf_name) {
|
||||
return sys_dist_ks.view_status(ks_name, cf_name).then([] (std::unordered_map<locator::host_id, sstring>&& view_statuses) {
|
||||
return boost::algorithm::any_of(view_statuses | boost::adaptors::map_values, [] (const sstring& view_status) {
|
||||
return view_status == "STARTED";
|
||||
future<bool> check_view_build_ongoing(db::system_distributed_keyspace& sys_dist_ks, const locator::token_metadata& tm, const sstring& ks_name,
|
||||
const sstring& cf_name) {
|
||||
using view_statuses_type = std::unordered_map<locator::host_id, sstring>;
|
||||
return sys_dist_ks.view_status(ks_name, cf_name).then([&tm] (view_statuses_type&& view_statuses) {
|
||||
return boost::algorithm::any_of(view_statuses, [&tm] (const view_statuses_type::value_type& view_status) {
|
||||
// Only consider status of known hosts.
|
||||
return view_status.second == "STARTED" && tm.get_endpoint_for_host_id(view_status.first);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const replica::table& t, streaming::stream_reason reason) {
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const locator::token_metadata& tm, const replica::table& t,
|
||||
streaming::stream_reason reason) {
|
||||
if (is_internal_keyspace(t.schema()->ks_name())) {
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
if (reason == streaming::stream_reason::repair && !t.views().empty()) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
return do_with(t.views(), [&sys_dist_ks] (auto& views) {
|
||||
return do_with(t.views(), [&sys_dist_ks, &tm] (auto& views) {
|
||||
return map_reduce(views,
|
||||
[&sys_dist_ks] (const view_ptr& view) { return check_view_build_ongoing(sys_dist_ks, view->ks_name(), view->cf_name()); },
|
||||
[&sys_dist_ks, &tm] (const view_ptr& view) { return check_view_build_ongoing(sys_dist_ks, tm, view->ks_name(), view->cf_name()); },
|
||||
false,
|
||||
std::logical_or<bool>());
|
||||
});
|
||||
|
||||
@@ -22,9 +22,13 @@ class system_distributed_keyspace;
|
||||
|
||||
}
|
||||
|
||||
namespace locator {
|
||||
class token_metadata;
|
||||
}
|
||||
|
||||
namespace db::view {
|
||||
|
||||
future<bool> check_view_build_ongoing(db::system_distributed_keyspace& sys_dist_ks, const sstring& ks_name, const sstring& cf_name);
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const replica::table& t, streaming::stream_reason reason);
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const locator::token_metadata& tm, const replica::table& t,
|
||||
streaming::stream_reason reason);
|
||||
|
||||
}
|
||||
|
||||
3
dist/common/scripts/scylla_coredump_setup
vendored
3
dist/common/scripts/scylla_coredump_setup
vendored
@@ -42,7 +42,8 @@ if __name__ == '__main__':
|
||||
if systemd_unit.available('systemd-coredump@.service'):
|
||||
dropin = '''
|
||||
[Service]
|
||||
TimeoutStartSec=infinity
|
||||
RuntimeMaxSec=infinity
|
||||
TimeoutSec=infinity
|
||||
'''[1:-1]
|
||||
os.makedirs('/etc/systemd/system/systemd-coredump@.service.d', exist_ok=True)
|
||||
with open('/etc/systemd/system/systemd-coredump@.service.d/timeout.conf', 'w') as f:
|
||||
|
||||
@@ -1,6 +1,77 @@
|
||||
### a dictionary of redirections
|
||||
#old path: new path
|
||||
|
||||
# removing the Enterprise upgrade guides from the Open Source documentation
|
||||
|
||||
/stable/upgrade/upgrade-enterprise/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-ubuntu.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-ubuntu.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-debian.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-image.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-image.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/metric-update-2021.1-to-2022.1.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/metric-update-2021.1-to-2022.1.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-ubuntu-16-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-ubuntu-16-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-ubuntu-18-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-ubuntu-18-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-debian.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/metric-update-2020.1-to-2021.1.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/metric-update-2020.1-to-2021.1.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-ubuntu-16-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-ubuntu-16-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-ubuntu-18-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-ubuntu-18-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-debian.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/metric-update-2019.1-to-2020.1.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/metric-update-2019.1-to-2020.1.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/upgrade-guide-from-2018.1-to-2019.1-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/upgrade-guide-from-2018.1-to-2019.1-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/upgrade-guide-from-2018.1-to-2019.1-ubuntu-16-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/upgrade-guide-from-2018.1-to-2019.1-ubuntu-16-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/metric-update-2018.1-to-2019.1.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/metric-update-2018.1-to-2019.1.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-ubuntu.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-ubuntu.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-debian.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/metric-update-2017.1-to-2018.1.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/metric-update-2017.1-to-2018.1.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-ubuntu-14-to-16.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-ubuntu-14-to-16.html
|
||||
/stable/getting-started/install-scylla/unified-installer.html#unified-installed-upgrade: https://enterprise.docs.scylladb.com/stable/getting-started/install-scylla/unified-installer.html#unified-installed-upgrade
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-image.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-image.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-18-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-18-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-20-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-20-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-debian-10.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/upgrade-guide-from-2022.x.y-to-2022.x.z-debian-10.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-ubuntu-16-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-ubuntu-16-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-ubuntu-18-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-ubuntu-18-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-ubuntu-20-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-ubuntu-20-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-debian-9.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-debian-9.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-debian-10.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2021.x.y-to-2021.x.z/upgrade-guide-from-2021.x.y-to-2021.x.z-debian-10.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-ubuntu-16-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-ubuntu-16-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-ubuntu-18-04.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-ubuntu-18-04.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-debian-9.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-debian-9.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-debian-10.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2020.x.y-to-2020.x.z/upgrade-guide-from-2020.x.y-to-2020.x.z-debian-10.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-debian.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-debian.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/index.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/index.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-rpm.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-rpm.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu.html
|
||||
/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-debian.html: https://enterprise.docs.scylladb.com/stable/upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-debian.html
|
||||
|
||||
# removing the Enterprise-only content from the Open Source documentation
|
||||
|
||||
/stable/using-scylla/workload-prioritization: https://enterprise.docs.scylladb.com//stable/using-scylla/workload-prioritization.html
|
||||
/stable/operating-scylla/security/encryption-at-rest: https://enterprise.docs.scylladb.com/stable/operating-scylla/security/encryption-at-rest.html
|
||||
/stable/operating-scylla/security/ldap-authentication: https://enterprise.docs.scylladb.com/stable/operating-scylla/security/ldap-authentication.html
|
||||
/stable/operating-scylla/security/ldap-authorization: https://enterprise.docs.scylladb.com/stable/operating-scylla/security/ldap-authorization.html
|
||||
/stable/operating-scylla/security/auditing: https://enterprise.docs.scylladb.com/stable/operating-scylla/security/auditing.html
|
||||
|
||||
# unifying the Ubunut upgrade guide for different Ubuntu versions: from 5.0 to 2022.1
|
||||
|
||||
/stable/upgrade/upgrade-to-enterprise/upgrade-guide-from-5.0-to-2022.1/upgrade-guide-from-5.0-to-2022.1-ubuntu-18-04.html: /stable/upgrade/upgrade-to-enterprise/upgrade-guide-from-5.0-to-2022.1/upgrade-guide-from-5.0-to-2022.1-ubuntu.html
|
||||
@@ -1112,14 +1183,14 @@ tls-ssl/index.html: /stable/operating-scylla/security
|
||||
/using-scylla/integrations/integration_kairos/index.html: /stable/using-scylla/integrations/integration-kairos
|
||||
/upgrade/ami_upgrade/index.html: /stable/upgrade/ami-upgrade
|
||||
|
||||
/scylla-cloud/cloud-setup/gcp-vpc-peering/index.html: /stable/scylla-cloud/cloud-setup/GCP/gcp-vpc-peering
|
||||
/scylla-cloud/cloud-setup/GCP/gcp-vcp-peering/index.html: /stable/scylla-cloud/cloud-setup/GCP/gcp-vpc-peering
|
||||
/scylla-cloud/cloud-setup/gcp-vpc-peering/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/gcp-vpc-peering.html
|
||||
/scylla-cloud/cloud-setup/GCP/gcp-vcp-peering/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/gcp-vpc-peering.html
|
||||
|
||||
# move scylla cloud for AWS to dedicated directory
|
||||
/scylla-cloud/cloud-setup/aws-vpc-peering/index.html: /stable/scylla-cloud/cloud-setup/AWS/aws-vpc-peering
|
||||
/scylla-cloud/cloud-setup/cloud-prom-proxy/index.html: /stable/scylla-cloud/cloud-setup/AWS/cloud-prom-proxy
|
||||
/scylla-cloud/cloud-setup/outposts/index.html: /stable/scylla-cloud/cloud-setup/AWS/outposts
|
||||
/scylla-cloud/cloud-setup/scylla-cloud-byoa/index.html: /stable/scylla-cloud/cloud-setup/AWS/scylla-cloud-byoa
|
||||
/scylla-cloud/cloud-setup/aws-vpc-peering/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/aws-vpc-peering.html
|
||||
/scylla-cloud/cloud-setup/cloud-prom-proxy/index.html: https://cloud.docs.scylladb.com/stable/monitoring/cloud-prom-proxy.html
|
||||
/scylla-cloud/cloud-setup/outposts/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/outposts.html
|
||||
/scylla-cloud/cloud-setup/scylla-cloud-byoa/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/scylla-cloud-byoa.html
|
||||
/scylla-cloud/cloud-services/scylla_cloud_costs/index.html: /stable/scylla-cloud/cloud-services/scylla-cloud-costs
|
||||
/scylla-cloud/cloud-services/scylla_cloud_managin_versions/index.html: /stable/scylla-cloud/cloud-services/scylla-cloud-managin-versions
|
||||
/scylla-cloud/cloud-services/scylla_cloud_support_alerts_sla/index.html: /stable/scylla-cloud/cloud-services/scylla-cloud-support-alerts-sla
|
||||
|
||||
@@ -161,6 +161,10 @@ events appear in the Streams API as normal deletions - without the
|
||||
distinctive marker on deletions which are really expirations.
|
||||
See <https://github.com/scylladb/scylla/issues/5060>.
|
||||
|
||||
<!--- REMOVE IN FUTURE VERSIONS - Remove the note below in version 5.3/2023.1 -->
|
||||
|
||||
> **Note** This feature is experimental in versions earlier than ScyllaDB Open Source 5.2 and ScyllaDB Enterprise 2022.2.
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Raft Consensus Algorithm in ScyllaDB
|
||||
Introduction
|
||||
--------------
|
||||
ScyllaDB was originally designed, following Apache Cassandra, to use gossip for topology and schema updates and the Paxos consensus algorithm for
|
||||
strong data consistency (:doc:`LWT </using-scylla/lwt>`). To achieve stronger consistency without performance penalty, ScyllaDB 5.x has turned to Raft - a consensus algorithm designed as an alternative to both gossip and Paxos.
|
||||
strong data consistency (:doc:`LWT </using-scylla/lwt>`). To achieve stronger consistency without performance penalty, ScyllaDB has turned to Raft - a consensus algorithm designed as an alternative to both gossip and Paxos.
|
||||
|
||||
Raft is a consensus algorithm that implements a distributed, consistent, replicated log across members (nodes). Raft implements consensus by first electing a distinguished leader, then giving the leader complete responsibility for managing the replicated log. The leader accepts log entries from clients, replicates them on other servers, and tells servers when it is safe to apply log entries to their state machines.
|
||||
|
||||
@@ -13,9 +13,9 @@ Raft uses a heartbeat mechanism to trigger a leader election. All servers start
|
||||
|
||||
Leader selection is described in detail in the `Raft paper <https://raft.github.io/raft.pdf>`_.
|
||||
|
||||
ScyllaDB 5.x may use Raft to maintain schema updates in every node (see below). Any schema update, like ALTER, CREATE or DROP TABLE, is first committed as an entry in the replicated Raft log, and, once stored on most replicas, applied to all nodes **in the same order**, even in the face of a node or network failures.
|
||||
ScyllaDB can use Raft to maintain schema updates in every node (see below). Any schema update, like ALTER, CREATE or DROP TABLE, is first committed as an entry in the replicated Raft log, and, once stored on most replicas, applied to all nodes **in the same order**, even in the face of a node or network failures.
|
||||
|
||||
Following ScyllaDB 5.x releases will use Raft to guarantee consistent topology updates similarly.
|
||||
Upcoming ScyllaDB releases will use Raft to guarantee consistent topology updates similarly.
|
||||
|
||||
.. _raft-quorum-requirement:
|
||||
|
||||
@@ -26,90 +26,55 @@ Raft requires at least a quorum of nodes in a cluster to be available. If multip
|
||||
and the quorum is lost, the cluster is unavailable for schema updates. See :ref:`Handling Failures <raft-handling-failures>`
|
||||
for information on how to handle failures.
|
||||
|
||||
|
||||
Upgrade Considerations for ScyllaDB 5.0 and Later
|
||||
==================================================
|
||||
|
||||
Note that when you have a two-DC cluster with the same number of nodes in each DC, the cluster will lose the quorum if one
|
||||
of the DCs is down.
|
||||
**We recommend configuring three DCs per cluster to ensure that the cluster remains available and operational when one DC is down.**
|
||||
|
||||
.. _enabling-raft-existing-cluster:
|
||||
|
||||
Enabling Raft
|
||||
---------------
|
||||
|
||||
Enabling Raft in ScyllaDB 5.0 and 5.1
|
||||
=====================================
|
||||
|
||||
.. warning::
|
||||
In ScyllaDB 5.0 and 5.1, Raft is an experimental feature.
|
||||
|
||||
It is not possible to enable Raft in an existing cluster in ScyllaDB 5.0 and 5.1.
|
||||
In order to have a Raft-enabled cluster in these versions, you must create a new cluster with Raft enabled from the start.
|
||||
|
||||
.. warning::
|
||||
|
||||
**Do not** use Raft in production clusters in ScyllaDB 5.0 and 5.1. Such clusters won't be able to correctly upgrade to ScyllaDB 5.2.
|
||||
|
||||
Use Raft only for testing and experimentation in clusters which can be thrown away.
|
||||
|
||||
.. warning::
|
||||
Once enabled, Raft cannot be disabled on your cluster. The cluster nodes will fail to restart if you remove the Raft feature.
|
||||
|
||||
When creating a new cluster, add ``raft`` to the list of experimental features in your ``scylla.yaml`` file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
experimental_features:
|
||||
- raft
|
||||
|
||||
.. _enabling-raft-existing-cluster:
|
||||
|
||||
Enabling Raft in ScyllaDB 5.2 and further
|
||||
=========================================
|
||||
|
||||
.. TODO include enterprise versions in this documentation
|
||||
|
||||
.. note::
|
||||
In ScyllaDB 5.2, Raft is Generally Available and can be safely used for consistent schema management.
|
||||
In ScyllaDB 5.3 it will become enabled by default.
|
||||
In further versions it will be mandatory.
|
||||
In ScyllaDB 5.2 and ScyllaDB Enterprise 2023.1 Raft is Generally Available and can be safely used for consistent schema management.
|
||||
In further versions, it will be mandatory.
|
||||
|
||||
ScyllaDB 5.2 and later comes equipped with a procedure that can setup Raft-based consistent cluster management in an existing cluster. We refer to this as the **internal Raft upgrade procedure** (do not confuse with the :doc:`ScyllaDB version upgrade procedure </upgrade/upgrade-opensource/upgrade-guide-from-5.1-to-5.2/upgrade-guide-from-5.1-to-5.2-generic>`).
|
||||
ScyllaDB Open Source 5.2 and later, and ScyllaDB Enterprise 2023.1 and later come equipped with a procedure that can setup Raft-based consistent cluster management in an existing cluster. We refer to this as the **Raft upgrade procedure** (do not confuse with the :doc:`ScyllaDB version upgrade procedure </upgrade/index/>`).
|
||||
|
||||
.. warning::
|
||||
Once enabled, Raft cannot be disabled on your cluster. The cluster nodes will fail to restart if you remove the Raft feature.
|
||||
|
||||
To enable Raft in an existing cluster in Scylla 5.2 and beyond:
|
||||
To enable Raft in an existing cluster, you need to enable the ``consistent_cluster_management`` option in the ``scylla.yaml`` file
|
||||
for **each node** in the cluster:
|
||||
|
||||
* ensure that the schema is synchronized in the cluster by executing :doc:`nodetool describecluster </operating-scylla/nodetool-commands/describecluster>` on each node and ensuring that the schema version is the same on all nodes,
|
||||
* then perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>`, updating the ``scylla.yaml`` file for **each node** in the cluster before restarting it to enable the ``consistent_cluster_management`` flag:
|
||||
#. Ensure that the schema is synchronized in the cluster by executing :doc:`nodetool describecluster </operating-scylla/nodetool-commands/describecluster>` on each node and ensuring that the schema version is the same on all nodes.
|
||||
#. Perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>`, updating the ``scylla.yaml`` file for **each node** in the cluster before restarting it to enable the ``consistent_cluster_management`` option:
|
||||
|
||||
.. code-block:: yaml
|
||||
.. code-block:: yaml
|
||||
|
||||
consistent_cluster_management: true
|
||||
consistent_cluster_management: true
|
||||
|
||||
When all the nodes in the cluster and updated and restarted, the cluster will start the **internal Raft upgrade procedure**.
|
||||
**You must then verify** that the internal Raft upgrade procedure has finished successfully. Refer to the :ref:`next section <verify-raft-procedure>`.
|
||||
When all the nodes in the cluster and updated and restarted, the cluster will start the **Raft upgrade procedure**.
|
||||
**You must then verify** that the Raft upgrade procedure has finished successfully. Refer to the :ref:`next section <verify-raft-procedure>`.
|
||||
|
||||
You can also enable the ``consistent_cluster_management`` flag while performing :doc:`rolling upgrade from 5.1 to 5.2 </upgrade/upgrade-opensource/upgrade-guide-from-5.1-to-5.2/upgrade-guide-from-5.1-to-5.2-generic>`: update ``scylla.yaml`` before restarting each node. The internal Raft upgrade procedure will start as soon as the last node was upgraded and restarted. As above, this requires :ref:`verifying <verify-raft-procedure>` that this internal procedure successfully finishes.
|
||||
Alternatively, you can enable the ``consistent_cluster_management`` option when you are:
|
||||
|
||||
Finally, you can enable the ``consistent_cluster_management`` flag when creating a new cluster. This does not use the internal Raft upgrade procedure; instead, Raft is functioning in the cluster and managing schema right from the start.
|
||||
* Performing a rolling upgrade from version 5.1 to 5.2 or version 2022.x to 2023.1 by updating ``scylla.yaml`` before restarting each node. The Raft upgrade procedure will start as soon as the last node was upgraded and restarted. As above, this requires :ref:`verifying <verify-raft-procedure>` that the procedure successfully finishes.
|
||||
* Creating a new cluster. This does not use the Raft upgrade procedure; instead, Raft is functioning in the cluster and managing schema right from the start.
|
||||
|
||||
Until all nodes are restarted with ``consistent_cluster_management: true``, it is still possible to turn this option back off. Once enabled on every node, it must remain turned on (or the node will refuse to restart).
|
||||
|
||||
.. _verify-raft-procedure:
|
||||
|
||||
Verifying that the internal Raft upgrade procedure finished successfully
|
||||
Verifying that the Raft upgrade procedure finished successfully
|
||||
========================================================================
|
||||
|
||||
.. versionadded:: 5.2
|
||||
|
||||
The internal Raft upgrade procedure starts as soon as every node in the cluster restarts with ``consistent_cluster_management`` flag enabled in ``scylla.yaml``.
|
||||
The Raft upgrade procedure starts as soon as every node in the cluster restarts with ``consistent_cluster_management`` flag enabled in ``scylla.yaml``.
|
||||
|
||||
.. TODO: update the above sentence once 5.3 and later are released.
|
||||
|
||||
The procedure requires **full cluster availability** to correctly setup the Raft algorithm; after the setup finishes, Raft can proceed with only a majority of nodes, but this initial setup is an exception.
|
||||
An unlucky event, such as a hardware failure, may cause one of your nodes to fail. If this happens before the internal Raft upgrade procedure finishes, the procedure will get stuck and your intervention will be required.
|
||||
An unlucky event, such as a hardware failure, may cause one of your nodes to fail. If this happens before the Raft upgrade procedure finishes, the procedure will get stuck and your intervention will be required.
|
||||
|
||||
To verify that the procedure finishes, look at the log of every Scylla node (using ``journalctl _COMM=scylla``). Search for the following patterns:
|
||||
|
||||
@@ -204,8 +169,6 @@ If some nodes are **dead and irrecoverable**, you'll need to perform a manual re
|
||||
Verifying that Raft is enabled
|
||||
===============================
|
||||
|
||||
.. versionadded:: 5.2
|
||||
|
||||
You can verify that Raft is enabled on your cluster by performing the following query on each node:
|
||||
|
||||
.. code-block:: sql
|
||||
@@ -224,7 +187,7 @@ The query should return:
|
||||
|
||||
on every node.
|
||||
|
||||
If the query returns 0 rows, or ``value`` is ``synchronize`` or ``use_pre_raft_procedures``, it means that the cluster is in the middle of the internal Raft upgrade procedure; consult the :ref:`relevant section <verify-raft-procedure>`.
|
||||
If the query returns 0 rows, or ``value`` is ``synchronize`` or ``use_pre_raft_procedures``, it means that the cluster is in the middle of the Raft upgrade procedure; consult the :ref:`relevant section <verify-raft-procedure>`.
|
||||
|
||||
If ``value`` is ``recovery``, it means that the cluster is in the middle of the manual recovery procedure. The procedure must be finished. Consult :ref:`the section about Raft recovery <recover-raft-procedure>`.
|
||||
|
||||
@@ -276,12 +239,8 @@ Examples
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the node. If the node is dead, :doc:`replace it with a new node </operating-scylla/procedures/cluster-management/replace-dead-node/>`.
|
||||
* - 2 nodes
|
||||
- Cluster is not fully operational. The data is available for reads and writes, but schema changes are impossible.
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- Restart at least 1 of the 2 nodes that are down to regain quorum. If you can’t recover at least 1 of the 2 nodes, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
* - 1 datacenter
|
||||
- Cluster is not fully operational. The data is available for reads and writes, but schema changes are impossible.
|
||||
- When the DC comes back online, restart the nodes. If the DC does not come back online and nodes are lost, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
|
||||
|
||||
.. list-table:: Cluster B: 2 datacenters, 6 nodes (3 nodes per DC)
|
||||
:widths: 20 40 40
|
||||
@@ -294,10 +253,10 @@ Examples
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the node(s). If the node is dead, :doc:`replace it with a new node </operating-scylla/procedures/cluster-management/replace-dead-node/>`.
|
||||
* - 3 nodes
|
||||
- Cluster is not fully operational. The data is available for reads and writes, but schema changes are impossible.
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- Restart 1 of the 3 nodes that are down to regain quorum. If you can’t recover at least 1 of the 3 failed nodes, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
* - 1DC
|
||||
- Cluster is not fully operational. The data is available for reads and writes, but schema changes are impossible.
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- When the DCs come back online, restart the nodes. If the DC fails to come back online and the nodes are lost, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
|
||||
|
||||
@@ -315,7 +274,7 @@ Examples
|
||||
- Schema updates are possible and safe.
|
||||
- When the DC comes back online, try restarting the nodes in the cluster. If the nodes are dead, :doc:`add 3 new nodes in a new region </operating-scylla/procedures/cluster-management/add-dc-to-existing-dc/>`.
|
||||
* - 2 DCs
|
||||
- Cluster is not fully operational. The data is available for reads and writes, but schema changes are impossible.
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- When the DCs come back online, restart the nodes. If at least one DC fails to come back online and the nodes are lost, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
|
||||
.. _recover-raft-procedure:
|
||||
@@ -323,26 +282,24 @@ Examples
|
||||
Raft manual recovery procedure
|
||||
==============================
|
||||
|
||||
.. versionadded:: 5.2
|
||||
|
||||
The manual Raft recovery procedure applies to the following situations:
|
||||
|
||||
* :ref:`The internal Raft upgrade procedure <verify-raft-procedure>` got stuck because one of your nodes failed in the middle of the procedure and is irrecoverable,
|
||||
* :ref:`The Raft upgrade procedure <verify-raft-procedure>` got stuck because one of your nodes failed in the middle of the procedure and is irrecoverable,
|
||||
* or the cluster was running Raft but a majority of nodes (e.g. 2 our of 3) failed and are irrecoverable. Raft cannot progress unless a majority of nodes is available.
|
||||
|
||||
.. warning::
|
||||
|
||||
Perform the manual recovery procedure **only** if you're dealing with **irrecoverable** nodes. If it is possible to restart your nodes, do that instead of manual recovery.
|
||||
|
||||
.. warning::
|
||||
.. note::
|
||||
|
||||
Before proceeding, make sure that the irrecoverable nodes are truly dead, and not, for example, temporarily partitioned away due to a network failure. If it is possible for the 'dead' nodes to come back to life, they might communicate and interfere with the recovery procedure and cause unpredictable problems.
|
||||
|
||||
If you have no means of ensuring that these irrecoverable nodes won't come back to life and communicate with the rest of the cluster, setup firewall rules or otherwise isolate your alive nodes to reject any communication attempts from these dead nodes.
|
||||
|
||||
During the manual recovery procedure you'll enter a special ``RECOVERY`` mode, remove all faulty nodes (using the standard :doc:`node removal procedure </operating-scylla/procedures/cluster-management/remove-node/>`), delete the internal Raft data, and restart the cluster. This will cause the cluster to perform the internal Raft upgrade procedure again, initializing the Raft algorithm from scratch. The manual recovery procedure is applicable both to clusters which were not running Raft in the past and then had Raft enabled, and to clusters which were bootstrapped using Raft.
|
||||
During the manual recovery procedure you'll enter a special ``RECOVERY`` mode, remove all faulty nodes (using the standard :doc:`node removal procedure </operating-scylla/procedures/cluster-management/remove-node/>`), delete the internal Raft data, and restart the cluster. This will cause the cluster to perform the Raft upgrade procedure again, initializing the Raft algorithm from scratch. The manual recovery procedure is applicable both to clusters which were not running Raft in the past and then had Raft enabled, and to clusters which were bootstrapped using Raft.
|
||||
|
||||
.. warning::
|
||||
.. note::
|
||||
|
||||
Entering ``RECOVERY`` mode requires a node restart. Restarting an additional node while some nodes are already dead may lead to unavailability of data queries (assuming that you haven't lost it already). For example, if you're using the standard RF=3, CL=QUORUM setup, and you're recovering from a stuck of upgrade procedure because one of your nodes is dead, restarting another node will cause temporary data query unavailability (until the node finishes restarting). Prepare your service for downtime before proceeding.
|
||||
|
||||
@@ -393,4 +350,3 @@ Learn More About Raft
|
||||
* `Making Schema Changes Safe with Raft <https://www.scylladb.com/presentations/making-schema-changes-safe-with-raft/>`_ - A Scylla Summit talk by Konstantin Osipov (register for access)
|
||||
* `The Future of Consensus in ScyllaDB 5.0 and Beyond <https://www.scylladb.com/presentations/the-future-of-consensus-in-scylladb-5-0-and-beyond/>`_ - A Scylla Summit talk by Tomasz Grabiec (register for access)
|
||||
|
||||
|
||||
|
||||
@@ -823,7 +823,8 @@ The ``tombstone_gc`` option allows you to prevent data resurrection. With the ``
|
||||
are only removed after :term:`repair` is performed. Unlike ``gc_grace_seconds``, ``tombstone_gc`` has no time constraints - when
|
||||
the ``repair`` mode is on, tombstones garbage collection will wait until repair is run.
|
||||
|
||||
The ``tombstone_gc`` option can be enabled using ``ALTER TABLE`` and ``CREATE TABLE``. For example:
|
||||
You can enable the after-repair tombstone GC by setting the ``repair`` mode using
|
||||
``ALTER TABLE`` or ``CREATE TABLE``. For example:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
@@ -833,10 +834,6 @@ The ``tombstone_gc`` option can be enabled using ``ALTER TABLE`` and ``CREATE TA
|
||||
|
||||
ALTER TABLE ks.cf WITH tombstone_gc = {'mode':'repair'} ;
|
||||
|
||||
.. note::
|
||||
The ``tombstone_gc`` option was added in ScyllaDB 5.0 as an experimental feature, and it is disabled by default.
|
||||
You need to explicitly specify the ``repair`` mode table property to enable the feature.
|
||||
|
||||
The following modes are available:
|
||||
|
||||
.. list-table::
|
||||
@@ -846,7 +843,7 @@ The following modes are available:
|
||||
* - Mode
|
||||
- Description
|
||||
* - ``timeout``
|
||||
- Tombstone GC is performed after the wait time specified with ``gc_grace_seconds``. Default in ScyllaDB 5.0.
|
||||
- Tombstone GC is performed after the wait time specified with ``gc_grace_seconds`` (default).
|
||||
* - ``repair``
|
||||
- Tombstone GC is performed after repair is run.
|
||||
* - ``disabled``
|
||||
|
||||
@@ -25,7 +25,7 @@ Getting Started
|
||||
:id: "getting-started"
|
||||
:class: my-panel
|
||||
|
||||
* `Install ScyllaDB (Binary Packages, Docker, or EC2) <https://www.scylladb.com/download/>`_ - Links to the ScyllaDB Download Center
|
||||
* `Install ScyllaDB (Binary Packages, Docker, or EC2) <https://www.scylladb.com/download/#core>`_ - Links to the ScyllaDB Download Center
|
||||
|
||||
* :doc:`Configure ScyllaDB </getting-started/system-configuration/>`
|
||||
* :doc:`Run ScyllaDB in a Shared Environment </getting-started/scylla-in-a-shared-environment>`
|
||||
|
||||
@@ -20,7 +20,7 @@ Install ScyllaDB
|
||||
|
||||
Keep your versions up-to-date. The two latest versions are supported. Also always install the latest patches for your version.
|
||||
|
||||
* Download and install ScyllaDB Server, Drivers and Tools in `Scylla Download Center <https://www.scylladb.com/download/#server/>`_
|
||||
* Download and install ScyllaDB Server, Drivers and Tools in `ScyllaDB Download Center <https://www.scylladb.com/download/#core>`_
|
||||
* :doc:`ScyllaDB Web Installer for Linux <scylla-web-installer>`
|
||||
* :doc:`ScyllaDB Unified Installer (relocatable executable) <unified-installer>`
|
||||
* :doc:`Air-gapped Server Installation <air-gapped-install>`
|
||||
|
||||
@@ -4,7 +4,7 @@ ScyllaDB Web Installer for Linux
|
||||
|
||||
ScyllaDB Web Installer is a platform-agnostic installation script you can run with ``curl`` to install ScyllaDB on Linux.
|
||||
|
||||
See `ScyllaDB Download Center <https://www.scylladb.com/download/#server>`_ for information on manually installing ScyllaDB with platform-specific installation packages.
|
||||
See `ScyllaDB Download Center <https://www.scylladb.com/download/#core>`_ for information on manually installing ScyllaDB with platform-specific installation packages.
|
||||
|
||||
Prerequisites
|
||||
--------------
|
||||
|
||||
@@ -25,11 +25,7 @@ ScyllaDB Open Source
|
||||
|
||||
.. note::
|
||||
|
||||
Recommended OS and ScyllaDB AMI/Image OS for ScyllaDB Open Source:
|
||||
|
||||
- Ubuntu 20.04 for versions 4.6 and later.
|
||||
- CentOS 7 for versions earlier than 4.6.
|
||||
|
||||
The recommended OS for ScyllaDB Open Source is Ubuntu 22.04.
|
||||
|
||||
+----------------------------+----------------------------------+-----------------------------+---------+-------+
|
||||
| Linux Distributions | Ubuntu | Debian | CentOS /| Rocky/|
|
||||
@@ -37,6 +33,8 @@ ScyllaDB Open Source
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| ScyllaDB Version / Version | 14.04| 16.04| 18.04|20.04 |22.04 | 8 | 9 | 10 | 11 | 7 | 8 |
|
||||
+============================+======+======+======+======+======+======+======+=======+=======+=========+=======+
|
||||
| 5.2 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 5.1 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 5.0 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
@@ -63,17 +61,18 @@ ScyllaDB Open Source
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
|
||||
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 4.3).
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 4.3). Since
|
||||
version 5.2, the ScyllaDB AMI/Image OS for ScyllaDB Open Source is based on Ubuntu 22.04.
|
||||
|
||||
|
||||
|
||||
ScyllaDB Enterprise
|
||||
--------------------
|
||||
|
||||
.. note::
|
||||
Recommended OS and ScyllaDB AMI/Image OS for ScyllaDB Enterprise:
|
||||
|
||||
- Ubuntu 20.04 for versions 2021.1 and later.
|
||||
- CentOS 7 for versions earlier than 2021.1.
|
||||
The recommended OS for ScyllaDB Enterprise is Ubuntu 22.04.
|
||||
|
||||
|
||||
+----------------------------+-----------------------------------+---------------------------+--------+-------+
|
||||
| Linux Distributions | Ubuntu | Debian | CentOS/| Rocky/|
|
||||
@@ -83,7 +82,7 @@ ScyllaDB Enterprise
|
||||
+============================+======+======+======+======+=======+======+======+======+======+========+=======+
|
||||
| 2022.2 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2022.1 | |x| | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
| 2022.1 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2021.1 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
@@ -95,4 +94,5 @@ ScyllaDB Enterprise
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
|
||||
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 2021.1).
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 2021.1). Since
|
||||
version 2023.1, the ScyllaDB AMI/Image OS for ScyllaDB Enterprise is based on Ubuntu 22.04.
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
:image: /_static/img/mascots/scylla-docs.svg
|
||||
:search_box:
|
||||
|
||||
The most up-to-date documents for the fastest, best performing, high availability NoSQL database.
|
||||
New to ScyllaDB? Start `here <https://cloud.docs.scylladb.com/stable/scylladb-basics/>`_!
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -26,16 +26,7 @@
|
||||
<div class="grid-x grid-margin-x hs">
|
||||
|
||||
.. topic-box::
|
||||
:title: New to ScyllaDB? Start here!
|
||||
:link: https://cloud.docs.scylladb.com/stable/scylladb-basics/
|
||||
:class: large-4
|
||||
:anchor: ScyllaDB Basics
|
||||
|
||||
Learn the essentials of ScyllaDB.
|
||||
|
||||
|
||||
.. topic-box::
|
||||
:title: Let us manage your DB
|
||||
:title: ScyllaDB Cloud
|
||||
:link: https://cloud.docs.scylladb.com
|
||||
:class: large-4
|
||||
:anchor: ScyllaDB Cloud Documentation
|
||||
@@ -43,12 +34,20 @@
|
||||
Simplify application development with ScyllaDB Cloud - a fully managed database-as-a-service.
|
||||
|
||||
.. topic-box::
|
||||
:title: Manage your own DB
|
||||
:title: ScyllaDB Enterprise
|
||||
:link: https://enterprise.docs.scylladb.com
|
||||
:class: large-4
|
||||
:anchor: ScyllaDB Enterprise Documentation
|
||||
|
||||
Deploy and manage ScyllaDB's most stable enterprise-grade database with premium features and 24/7 support.
|
||||
|
||||
.. topic-box::
|
||||
:title: ScyllaDB Open Source
|
||||
:link: getting-started
|
||||
:class: large-4
|
||||
:anchor: ScyllaDB Open Source and Enterprise Documentation
|
||||
:anchor: ScyllaDB Open Source Documentation
|
||||
|
||||
Deploy and manage your database in your own environment.
|
||||
Deploy and manage your database in your environment.
|
||||
|
||||
|
||||
.. raw:: html
|
||||
@@ -59,40 +58,16 @@
|
||||
|
||||
<div class="topics-grid topics-grid--products">
|
||||
|
||||
<h2 class="topics-grid__title">Our Products</h2>
|
||||
<h2 class="topics-grid__title">Other Products</h2>
|
||||
|
||||
<div class="grid-container full">
|
||||
<div class="grid-x grid-margin-x">
|
||||
|
||||
.. topic-box::
|
||||
:title: ScyllaDB Enterprise
|
||||
:link: getting-started
|
||||
:image: /_static/img/mascots/scylla-enterprise.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
|
||||
ScyllaDB’s most stable high-performance enterprise-grade NoSQL database.
|
||||
|
||||
.. topic-box::
|
||||
:title: ScyllaDB Open Source
|
||||
:link: getting-started
|
||||
:image: /_static/img/mascots/scylla-opensource.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
|
||||
A high-performance NoSQL database with a close-to-the-hardware, shared-nothing approach.
|
||||
|
||||
.. topic-box::
|
||||
:title: ScyllaDB Cloud
|
||||
:link: https://cloud.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-cloud.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
|
||||
A fully managed NoSQL database as a service powered by ScyllaDB Enterprise.
|
||||
|
||||
.. topic-box::
|
||||
:title: ScyllaDB Alternator
|
||||
:link: https://docs.scylladb.com/stable/alternator/alternator.html
|
||||
:image: /_static/img/mascots/scylla-alternator.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Open source Amazon DynamoDB-compatible API.
|
||||
|
||||
@@ -100,7 +75,7 @@
|
||||
:title: ScyllaDB Monitoring Stack
|
||||
:link: https://monitoring.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-monitor.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Complete open source monitoring solution for your ScyllaDB clusters.
|
||||
|
||||
@@ -108,7 +83,7 @@
|
||||
:title: ScyllaDB Manager
|
||||
:link: https://manager.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-manager.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Hassle-free ScyllaDB NoSQL database management for scale-out clusters.
|
||||
|
||||
@@ -116,7 +91,7 @@
|
||||
:title: ScyllaDB Drivers
|
||||
:link: https://docs.scylladb.com/stable/using-scylla/drivers/
|
||||
:image: /_static/img/mascots/scylla-drivers.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Shard-aware drivers for superior performance.
|
||||
|
||||
@@ -124,7 +99,7 @@
|
||||
:title: ScyllaDB Operator
|
||||
:link: https://operator.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-enterprise.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Easily run and manage your ScyllaDB cluster on Kubernetes.
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
* endpoint_snitch - ``grep endpoint_snitch /etc/scylla/scylla.yaml``
|
||||
* Scylla version - ``scylla --version``
|
||||
* Authenticator - ``grep authenticator /etc/scylla/scylla.yaml``
|
||||
* consistent_cluster_management - ``grep consistent_cluster_management /etc/scylla/scylla.yaml``
|
||||
|
||||
.. Note::
|
||||
|
||||
|
||||
@@ -119,6 +119,7 @@ Add New DC
|
||||
* **listen_address** - IP address that Scylla used to connect to the other Scylla nodes in the cluster.
|
||||
* **endpoint_snitch** - Set the selected snitch.
|
||||
* **rpc_address** - Address for client connections (Thrift, CQL).
|
||||
* **consistent_cluster_management** - set to the same value as used by your existing nodes.
|
||||
|
||||
The parameters ``seeds``, ``cluster_name`` and ``endpoint_snitch`` need to match the existing cluster.
|
||||
|
||||
@@ -200,6 +201,11 @@ Add New DC
|
||||
|
||||
#. If you are using Scylla Monitoring, update the `monitoring stack <https://monitoring.docs.scylladb.com/stable/install/monitoring_stack.html#configure-scylla-nodes-from-files>`_ to monitor it. If you are using Scylla Manager, make sure you install the `Manager Agent <https://manager.docs.scylladb.com/stable/install-scylla-manager-agent.html>`_ and Manager can access the new DC.
|
||||
|
||||
Handling Failures
|
||||
=================
|
||||
|
||||
If one of the new nodes starts bootstrapping but then fails in the middle e.g. due to a power loss, you can retry bootstrap (by restarting the node). If you don't want to retry, or the node refuses to boot on subsequent attempts, consult the :doc:`Handling Membership Change Failures document</operating-scylla/procedures/cluster-management/handling-membership-change-failures>`.
|
||||
|
||||
Configure the Client not to Connect to the New DC
|
||||
-------------------------------------------------
|
||||
|
||||
|
||||
@@ -54,6 +54,8 @@ Procedure
|
||||
|
||||
* **seeds** - Specifies the IP address of an existing node in the cluster. The new node will use this IP to connect to the cluster and learn the cluster topology and state.
|
||||
|
||||
* **consistent_cluster_management** - set to the same value as used by your existing nodes.
|
||||
|
||||
.. note::
|
||||
|
||||
In earlier versions of ScyllaDB, seed nodes assisted in gossip. Starting with Scylla Open Source 4.3 and Scylla Enterprise 2021.1, the seed concept in gossip has been removed. If you are using an earlier version of ScyllaDB, you need to configure the seeds parameter in the following way:
|
||||
@@ -117,3 +119,8 @@ Procedure
|
||||
You don't need to restart the Scylla service after modifying the seeds list in ``scylla.yaml``.
|
||||
|
||||
#. If you are using Scylla Monitoring, update the `monitoring stack <https://monitoring.docs.scylladb.com/stable/install/monitoring_stack.html#configure-scylla-nodes-from-files>`_ to monitor it. If you are using Scylla Manager, make sure you install the `Manager Agent <https://manager.docs.scylladb.com/stable/install-scylla-manager-agent.html>`_, and Manager can access it.
|
||||
|
||||
Handling Failures
|
||||
=================
|
||||
|
||||
If the node starts bootstrapping but then fails in the middle e.g. due to a power loss, you can retry bootstrap (by restarting the node). If you don't want to retry, or the node refuses to boot on subsequent attempts, consult the :doc:`Handling Membership Change Failures document</operating-scylla/procedures/cluster-management/handling-membership-change-failures>`.
|
||||
|
||||
@@ -70,6 +70,7 @@ the file can be found under ``/etc/scylla/``
|
||||
- **listen_address** - IP address that the Scylla use to connect to other Scylla nodes in the cluster
|
||||
- **endpoint_snitch** - Set the selected snitch
|
||||
- **rpc_address** - Address for client connection (Thrift, CQLSH)
|
||||
- **consistent_cluster_management** - ``true`` by default, can be set to ``false`` if you don't want to use Raft for consistent schema management in this cluster (will be mandatory in later versions). Check the :doc:`Raft in ScyllaDB document</architecture/raft/>` to learn more.
|
||||
|
||||
3. In the ``cassandra-rackdc.properties`` file, edit the rack and data center information.
|
||||
The file can be found under ``/etc/scylla/``.
|
||||
|
||||
@@ -26,6 +26,7 @@ The file can be found under ``/etc/scylla/``
|
||||
- **listen_address** - IP address that Scylla used to connect to other Scylla nodes in the cluster
|
||||
- **endpoint_snitch** - Set the selected snitch
|
||||
- **rpc_address** - Address for client connection (Thrift, CQL)
|
||||
- **consistent_cluster_management** - ``true`` by default, can be set to ``false`` if you don't want to use Raft for consistent schema management in this cluster (will be mandatory in later versions). Check the :doc:`Raft in ScyllaDB document</architecture/raft/>` to learn more.
|
||||
|
||||
3. This step needs to be done **only** if you are using the **GossipingPropertyFileSnitch**. If not, skip this step.
|
||||
In the ``cassandra-rackdc.properties`` file, edit the parameters listed below.
|
||||
|
||||
@@ -63,6 +63,7 @@ Perform the following steps for each node in the new cluster:
|
||||
* **rpc_address** - Address for client connection (Thrift, CQL).
|
||||
* **broadcast_address** - The IP address a node tells other nodes in the cluster to contact it by.
|
||||
* **broadcast_rpc_address** - Default: unset. The RPC address to broadcast to drivers and other Scylla nodes. It cannot be set to 0.0.0.0. If left blank, it will be set to the value of ``rpc_address``. If ``rpc_address`` is set to 0.0.0.0, ``broadcast_rpc_address`` must be explicitly configured.
|
||||
* **consistent_cluster_management** - ``true`` by default, can be set to ``false`` if you don't want to use Raft for consistent schema management in this cluster (will be mandatory in later versions). Check the :doc:`Raft in ScyllaDB document</architecture/raft/>` to learn more.
|
||||
|
||||
#. After you have installed and configured Scylla and edited ``scylla.yaml`` file on all the nodes, start the node specified with the ``seeds`` parameter. Then start the rest of the nodes in your cluster, one at a time, using
|
||||
``sudo systemctl start scylla-server``.
|
||||
|
||||
@@ -0,0 +1,204 @@
|
||||
Handling Cluster Membership Change Failures
|
||||
*******************************************
|
||||
|
||||
A failure may happen in the middle of a cluster membership change (that is bootstrap, decommission, removenode, or replace), such as loss of power. If that happens, you should ensure that the cluster is brought back to a consistent state as soon as possible. Further membership changes might be impossible until you do so.
|
||||
|
||||
For example, a node that crashed in the middle of decommission might leave the cluster in a state where it considers the node to still be a member, but the node itself will refuse to restart and communicate with the cluster. This particular case is very unlikely - it requires a specifically timed crash to happen, after the data streaming phase of decommission finishes but before the node commits that it left. But if it happens, you won't be able to bootstrap other nodes (they will try to contact the partially-decommissioned node and fail) until you remove the remains of the node that crashed.
|
||||
|
||||
---------------------------
|
||||
Handling a Failed Bootstrap
|
||||
---------------------------
|
||||
|
||||
If a failure happens when trying to bootstrap a new node to the cluster, you can try bootstrapping the node again by restarting it.
|
||||
|
||||
If the failure persists or you decided that you don't want to bootstrap the node anymore, follow the instructions in the :ref:`cleaning up after a failed membership change <cleaning-up-after-change>` section to remove the remains of the bootstrapping node. You can then clear the node's data directories and attempt to bootstrap it again.
|
||||
|
||||
------------------------------
|
||||
Handling a Failed Decommission
|
||||
------------------------------
|
||||
|
||||
There are two cases.
|
||||
|
||||
Most likely the failure happened during the data repair/streaming phase - before the node tried to leave the token ring. Look for a log message containing "leaving token ring" in the logs of the node that you tried to decommission. For example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
INFO 2023-03-14 13:08:38,323 [shard 0] storage_service - decommission[5b2e752e-964d-4f36-871f-254491f4e8cc]: leaving token ring
|
||||
|
||||
If the message is **not** present, the failure happened before the node tried to leave the token ring. In that case you can simply restart the node and attempt to decommission it again.
|
||||
|
||||
If the message is present, the node attempted to leave the token ring, but it might have left the cluster only partially before the failure. **Do not try to restart the node**. Instead, you must make sure that the node is dead and remove any leftovers using the :doc:`removenode operation </operating-scylla/nodetool-commands/removenode/>`. See :ref:`cleaning up after a failed membership change <cleaning-up-after-change>`. Trying to restart the node after such failure results in unpredictable behavior - it may restart normally, it may refuse to restart, or it may even try to rebootstrap.
|
||||
|
||||
If you don't have access to the node's logs anymore, assume the second case (the node might have attempted to leave the token ring), **do not try to restart the node**, instead follow the :ref:`cleaning up after a failed membership change <cleaning-up-after-change>` section.
|
||||
|
||||
----------------------------
|
||||
Handling a Failed Removenode
|
||||
----------------------------
|
||||
|
||||
Simply retry the removenode operation.
|
||||
|
||||
If you somehow lost the host ID of the node that you tried to remove, follow the instructions in :ref:`cleaning up after a failed membership change <cleaning-up-after-change>`.
|
||||
|
||||
--------------------------
|
||||
Handling a Failed Replace
|
||||
--------------------------
|
||||
|
||||
Replace is a special case of bootstrap, but the bootstrapping node tries to take the place of another dead node. You can retry a failed replace operation by restarting the replacing node.
|
||||
|
||||
If the failure persists or you decided that you don't want to perform the replace anymore, follow the instructions in :ref:`cleaning up after a failed membership change <cleaning-up-after-change>` section to remove the remains of the replacing node. You can then clear the node's data directories and attempt to replace again. Alternatively, you can remove the dead node which you initially tried to replace using :doc:`removenode </operating-scylla/nodetool-commands/removenode/>`, and perform a regular bootstrap.
|
||||
|
||||
.. _cleaning-up-after-change:
|
||||
|
||||
--------------------------------------------
|
||||
Cleaning up after a Failed Membership Change
|
||||
--------------------------------------------
|
||||
|
||||
After a failed membership change, the cluster may contain remains of a node that tried to leave or join - other nodes may consider the node a member, possibly in a transitioning state. It is important to remove any such "ghost" members. Their presence may reduce the cluster's availability, performance, or prevent further membership changes.
|
||||
|
||||
You need to determine the host IDs of any potential ghost members, then remove them using the :doc:`removenode operation </operating-scylla/nodetool-commands/removenode/>`. Note that after a failed replace, there may be two different host IDs that you'll want to find and run ``removenode`` on: the new replacing node and the old node that you tried to replace. (Or you can remove the new node only, then try to replace the old node again.)
|
||||
|
||||
Step One: Determining Host IDs of Ghost Members
|
||||
===============================================
|
||||
|
||||
* After a failed bootstrap, you need to determine the host ID of the node that tried to bootstrap, if it managed to generate a host ID (it might not have chosen the host ID yet if it failed very early in the procedure, in which case there's nothing to remove). Look for a message containing ``system_keyspace - Setting local host id to`` in the node's logs, which will contain the node's host ID. For example: ``system_keyspace - Setting local host id to f180b78b-6094-434d-8432-7327f4d4b38d``. If you don't have access to the node's logs, read the generic method below.
|
||||
* After a failed decommission, you need to determine the host ID of the node that tried to decommission. You can search the node's logs as in the failed bootstrap case (see above), or you can use the generic method below.
|
||||
* After a failed removenode, you need to determine the host ID of the node that you tried to remove. You should already have it, since executing a removenode requires the host ID in the first place. But if you lost it somehow, read the generic method below.
|
||||
* After a failed replace, you need to determine the host ID of the replacing node. Search the node's logs as in the failed bootstrap case (see above), or you can use the generic method below. You may also want to determine the host ID of the replaced node - either to attempt replacing it again after removing the remains of the previous replacing node, or to remove it using :doc:`nodetool removenode </operating-scylla/nodetool-commands/removenode/>`. You should already have the host ID of the replaced node if you used the ``replace_node_first_boot`` option to perform the replace.
|
||||
|
||||
If you cannot determine the ghost members' host ID using the suggestions above, use the method described below. The approach differs depending on whether Raft is enabled in your cluster.
|
||||
|
||||
.. tabs::
|
||||
|
||||
.. group-tab:: Raft enabled
|
||||
|
||||
#. Make sure there are no ongoing membership changes.
|
||||
|
||||
#. Execute the following CQL query on one of your nodes to retrieve the Raft group 0 ID:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
select value from system.scylla_local where key = 'raft_group0_id'
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> select value from system.scylla_local where key = 'raft_group0_id';
|
||||
|
||||
value
|
||||
--------------------------------------
|
||||
607fef80-c276-11ed-a6f6-3075f294cc65
|
||||
|
||||
#. Use the obtained Raft group 0 ID to query the set of all cluster members' host IDs (which includes the ghost members), by executing the following query:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
select server_id from system.raft_state where group_id = <group0_id>
|
||||
|
||||
replace ``<group0_id>`` with the group 0 ID that you obtained. For example:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> select server_id from system.raft_state where group_id = 607fef80-c276-11ed-a6f6-3075f294cc65;
|
||||
|
||||
server_id
|
||||
--------------------------------------
|
||||
26a9badc-6e96-4b86-a8df-5173e5ab47fe
|
||||
7991e7f5-692e-45a0-8ae5-438be5bc7c4f
|
||||
aff11c6d-fbe7-4395-b7ca-3912d7dba2c6
|
||||
|
||||
#. Execute the following CQL query to obtain the host IDs of all token ring members:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
select host_id, up from system.cluster_status;
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> select peer, host_id, up from system.cluster_status;
|
||||
|
||||
peer | host_id | up
|
||||
-----------+--------------------------------------+-------
|
||||
127.0.0.3 | null | False
|
||||
127.0.0.1 | 26a9badc-6e96-4b86-a8df-5173e5ab47fe | True
|
||||
127.0.0.2 | 7991e7f5-692e-45a0-8ae5-438be5bc7c4f | True
|
||||
|
||||
The output of this query is similar to the output of ``nodetool status``.
|
||||
|
||||
We included the ``up`` column to see which nodes are down and the ``peer`` column to see their IP addresses.
|
||||
|
||||
In this example, one of the nodes tried to decommission and crashed as soon as it left the token ring but before it left the Raft group. Its entry will show up in ``system.cluster_status`` queries with ``host_id = null``, like above, until the cluster is restarted.
|
||||
|
||||
#. A host ID belongs to a ghost member if:
|
||||
|
||||
* It appears in the ``system.raft_state`` query but not in the ``system.cluster_status`` query,
|
||||
* Or it appears in the ``system.cluster_status`` query but does not correspond to any remaining node in your cluster.
|
||||
|
||||
In our example, the ghost member's host ID was ``aff11c6d-fbe7-4395-b7ca-3912d7dba2c6`` because it appeared in the ``system.raft_state`` query but not in the ``system.cluster_status`` query.
|
||||
|
||||
If you're unsure whether a given row in the ``system.cluster_status`` query corresponds to a node in your cluster, you can connect to each node in the cluster and execute ``select host_id from system.local`` (or search the node's logs) to obtain that node's host ID, collecting the host IDs of all nodes in your cluster. Then check if each host ID from the ``system.cluster_status`` query appears in your collected set; if not, it's a ghost member.
|
||||
|
||||
A good rule of thumb is to look at the members marked as down (``up = False`` in ``system.cluster_status``) - ghost members are eventually marked as down by the remaining members of the cluster. But remember that a real member might also be marked as down if it was shutdown or partitioned away from the rest of the cluster. If in doubt, connect to each node and collect their host IDs, as described in the previous paragraph.
|
||||
|
||||
.. group-tab:: Raft disabled
|
||||
|
||||
#. Make sure there are no ongoing membership changes.
|
||||
|
||||
#. Execute the following CQL query on one of your nodes to obtain the host IDs of all token ring members:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
select peer, host_id, up from system.cluster_status;
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> select peer, host_id, up from system.cluster_status;
|
||||
|
||||
peer | host_id | up
|
||||
-----------+--------------------------------------+-------
|
||||
127.0.0.3 | 42405b3b-487e-4759-8590-ddb9bdcebdc5 | False
|
||||
127.0.0.1 | 4e3ee715-528f-4dc9-b10f-7cf294655a9e | True
|
||||
127.0.0.2 | 225a80d0-633d-45d2-afeb-a5fa422c9bd5 | True
|
||||
|
||||
The output of this query is similar to the output of ``nodetool status``.
|
||||
|
||||
We included the ``up`` column to see which nodes are down.
|
||||
|
||||
In this example, one of the 3 nodes tried to decommission but crashed while it was leaving the token ring. The node is in a partially left state and will refuse to restart, but other nodes still consider it as a normal member. We'll have to use ``removenode`` to clean up after it.
|
||||
|
||||
#. A host ID belongs to a ghost member if it appears in the ``system.cluster_status`` query but does not correspond to any remaining node in your cluster.
|
||||
|
||||
If you're unsure whether a given row in the ``system.cluster_status`` query corresponds to a node in your cluster, you can connect to each node in the cluster and execute ``select host_id from system.local`` (or search the node's logs) to obtain that node's host ID, collecting the host IDs of all nodes in your cluster. Then check if each host ID from the ``system.cluster_status`` query appears in your collected set; if not, it's a ghost member.
|
||||
|
||||
A good rule of thumb is to look at the members marked as down (``up = False`` in ``system.cluster_status``) - ghost members are eventually marked as down by the remaining members of the cluster. But remember that a real member might also be marked as down if it was shutdown or partitioned away from the rest of the cluster. If in doubt, connect to each node and collect their host IDs, as described in the previous paragraph.
|
||||
|
||||
In our example, the ghost member's host ID is ``42405b3b-487e-4759-8590-ddb9bdcebdc5`` because it is the only member marked as down and we can verify that the other two rows appearing in ``system.cluster_status`` belong to the remaining 2 nodes in the cluster.
|
||||
|
||||
In some cases, even after a failed topology change, there may be no ghost members left - for example, if a bootstrapping node crashed very early in the procedure or a decommissioning node crashed after it committed the membership change but before it finalized its own shutdown steps.
|
||||
|
||||
If any ghost members are present, proceed to the next step.
|
||||
|
||||
Step Two: Removing the Ghost Members
|
||||
====================================
|
||||
|
||||
Given the host IDs of ghost members, you can remove them using ``removenode``; follow the :doc:`documentation for removenode operation </operating-scylla/nodetool-commands/removenode/>`.
|
||||
|
||||
If you're executing ``removenode`` too quickly after a failed membership change, an error similar to the following might pop up:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
nodetool: Scylla API server HTTP POST to URL '/storage_service/remove_node' failed: seastar::rpc::remote_verb_error (node_ops_cmd_check: Node 127.0.0.2 rejected node_ops_cmd=removenode_abort from node=127.0.0.1 with ops_uuid=0ba0a5ab-efbd-4801-a31c-034b5f55487c, pending_node_ops={b47523f2-de6a-4c38-8490-39127dba6b6a}, pending node ops is in progress)
|
||||
|
||||
In that case simply wait for 2 minutes before trying ``removenode`` again.
|
||||
|
||||
If ``removenode`` returns an error like:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
nodetool: Scylla API server HTTP POST to URL '/storage_service/remove_node' failed: std::runtime_error (removenode[12e7e05b-d1ae-4978-b6a6-de0066aa80d8]: Host ID 42405b3b-487e-4759-8590-ddb9bdcebdc5 not found in the cluster)
|
||||
|
||||
and you're sure that you're providing the correct Host ID, it means that the member was already removed and you don't have to clean up after it.
|
||||
@@ -25,6 +25,7 @@ Cluster Management Procedures
|
||||
Safely Shutdown Your Cluster <safe-shutdown>
|
||||
Safely Restart Your Cluster <safe-start>
|
||||
Cluster Membership Change <membership-changes>
|
||||
Handling Membership Change Failures <handling-membership-change-failures>
|
||||
repair-based-node-operation
|
||||
|
||||
.. panel-box::
|
||||
@@ -80,6 +81,8 @@ Cluster Management Procedures
|
||||
|
||||
* :doc:`Cluster Membership Change Notes </operating-scylla/procedures/cluster-management/membership-changes/>`
|
||||
|
||||
* :doc:`Handling Membership Change Failures </operating-scylla/procedures/cluster-management/handling-membership-change-failures>`
|
||||
|
||||
* :ref:`Add Bigger Nodes to a Cluster <add-bigger-nodes-to-a-cluster>`
|
||||
|
||||
* :doc:`Repair Based Node Operations (RBNO) </operating-scylla/procedures/cluster-management/repair-based-node-operation>`
|
||||
|
||||
@@ -49,6 +49,11 @@ Removing a Running Node
|
||||
|
||||
.. include:: /rst_include/clean-data-code.rst
|
||||
|
||||
Handling Failures
|
||||
-----------------
|
||||
|
||||
If ``nodetool decommission`` starts executing but then fails in the middle e.g. due to a power loss, consult the :doc:`Handling Membership Change Failures document</operating-scylla/procedures/cluster-management/handling-membership-change-failures>`.
|
||||
|
||||
----------------------------
|
||||
Removing an Unavailable Node
|
||||
----------------------------
|
||||
@@ -81,7 +86,6 @@ the ``nodetool removenode`` operation will fail. To ensure successful operation
|
||||
``nodetool removenode`` (not required when :doc:`Repair Based Node Operations (RBNO) <repair-based-node-operation>` for ``removenode``
|
||||
is enabled).
|
||||
|
||||
|
||||
Additional Information
|
||||
----------------------
|
||||
* :doc:`Nodetool Reference </operating-scylla/nodetool>`
|
||||
|
||||
@@ -25,6 +25,7 @@ Login to one of the nodes in the cluster with (UN) status, collect the following
|
||||
* seeds - ``cat /etc/scylla/scylla.yaml | grep seeds:``
|
||||
* endpoint_snitch - ``cat /etc/scylla/scylla.yaml | grep endpoint_snitch``
|
||||
* Scylla version - ``scylla --version``
|
||||
* consistent_cluster_management - ``grep consistent_cluster_management /etc/scylla/scylla.yaml``
|
||||
|
||||
Procedure
|
||||
---------
|
||||
|
||||
@@ -66,6 +66,8 @@ Procedure
|
||||
|
||||
- **rpc_address** - Address for client connection (Thrift, CQL)
|
||||
|
||||
- **consistent_cluster_management** - set to the same value as used by your existing nodes.
|
||||
|
||||
#. Add the ``replace_node_first_boot`` parameter to the ``scylla.yaml`` config file on the new node. This line can be added to any place in the config file. After a successful node replacement, there is no need to remove it from the ``scylla.yaml`` file. (Note: The obsolete parameters "replace_address" and "replace_address_first_boot" are not supported and should not be used). The value of the ``replace_node_first_boot`` parameter should be the Host ID of the node to be replaced.
|
||||
|
||||
For example (using the Host ID of the failed node from above):
|
||||
@@ -150,6 +152,12 @@ Procedure
|
||||
.. note::
|
||||
When :doc:`Repair Based Node Operations (RBNO) <repair-based-node-operation>` for **replace** is enabled, there is no need to rerun repair.
|
||||
|
||||
|
||||
Handling Failures
|
||||
-----------------
|
||||
|
||||
If the new node starts and begins the replace operation but then fails in the middle e.g. due to a power loss, you can retry the replace (by restarting the node). If you don't want to retry, or the node refuses to boot on subsequent attempts, consult the :doc:`Handling Membership Change Failures document</operating-scylla/procedures/cluster-management/handling-membership-change-failures>`.
|
||||
|
||||
------------------------------
|
||||
Setup RAID Following a Restart
|
||||
------------------------------
|
||||
|
||||
@@ -68,7 +68,7 @@ Gracefully stop the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
@@ -92,13 +92,13 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
3. Check scylla-enterprise-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
@@ -130,7 +130,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to the previous release
|
||||
----------------------------------
|
||||
@@ -164,7 +164,7 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
@@ -114,7 +114,7 @@ New io.conf format was introduced in ScyllaDB 2.3 and 2019.1. If your io.conf do
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
@@ -154,7 +154,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the old release
|
||||
------------------------------------
|
||||
|
||||
@@ -66,7 +66,7 @@ Gracefully stop the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
|
||||
@@ -16,13 +16,13 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
#. Check scylla-enterprise-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check again after 2 minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
@@ -54,7 +54,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to the previous release
|
||||
----------------------------------
|
||||
@@ -88,7 +88,7 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
@@ -7,7 +7,7 @@ This document is a step-by-step procedure for upgrading from ScyllaDB Enterprise
|
||||
|
||||
Applicable Versions
|
||||
===================
|
||||
This guide covers upgrading ScyllaDB Enterprise from version 2021.1.x to ScyllaDB Enterprise version 2022.1.y on |OS|. See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
This guide covers upgrading ScyllaDB Enterprise from version **2021.1.8** or later to ScyllaDB Enterprise version 2022.1.y on |OS|. See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
@@ -69,7 +69,7 @@ Gracefully stop the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
|
||||
@@ -36,13 +36,13 @@ A new io.conf format was introduced in Scylla 2.3 and 2019.1. If your io.conf do
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
#. Check scylla-enterprise-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check again after two minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
@@ -75,7 +75,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the old release
|
||||
------------------------------------
|
||||
@@ -120,7 +120,7 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
@@ -8,8 +8,8 @@ Upgrading ScyllaDB images requires updating:
|
||||
* Underlying OS packages. Starting with ScyllaDB 4.6, each ScyllaDB version includes a list of 3rd party and
|
||||
OS packages tested with the ScyllaDB release. The list depends on the base OS:
|
||||
|
||||
* ScyllaDB Open Source **4.4** and ScyllaDB Enterprise **2020.1** or earlier are based on **CentOS 7**.
|
||||
* ScyllaDB Open Source **4.5** and ScyllaDB Enterprise **2021.1** or later are based on **Ubuntu 20.04**.
|
||||
* ScyllaDB Open Source **5.0 and 5.1** and ScyllaDB Enterprise **2021.1, 2022.1, and 2022.2** are based on **Ubuntu 20.04**.
|
||||
* ScyllaDB Open Source **5.2** and ScyllaDB Enterprise **2023.1** are based on **Ubuntu 22.04**.
|
||||
|
||||
If you're running ScyllaDB Open Source 5.0 or later or ScyllaDB Enterprise 2021.1.10 or later, you can
|
||||
automatically update 3rd party and OS packages together with the ScyllaDB packages - by running one command.
|
||||
|
||||
@@ -6,10 +6,10 @@ Upgrade ScyllaDB
|
||||
:titlesonly:
|
||||
:hidden:
|
||||
|
||||
ScyllaDB Enterprise <upgrade-enterprise/index>
|
||||
ScyllaDB Open Source <upgrade-opensource/index>
|
||||
ScyllaDB Open Source to ScyllaDB Enterprise <upgrade-to-enterprise/index>
|
||||
ScyllaDB AMI <ami-upgrade>
|
||||
ScyllaDB Enterprise <https://enterprise.docs.scylladb.com/enterprise/upgrade/upgrade-enterprise/index.html>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -23,14 +23,14 @@ Upgrade ScyllaDB
|
||||
|
||||
Procedures for upgrading Scylla.
|
||||
|
||||
* :doc:`Upgrade ScyllaDB Enterprise <upgrade-enterprise/index>`
|
||||
|
||||
* :doc:`Upgrade ScyllaDB Open Source <upgrade-opensource/index>`
|
||||
|
||||
* :doc:`Upgrade from ScyllaDB Open Source to Scylla Enterprise <upgrade-to-enterprise/index>`
|
||||
|
||||
* :doc:`Upgrade ScyllaDB AMI <ami-upgrade>`
|
||||
|
||||
* `Upgrade ScyllaDB Enterprise <https://enterprise.docs.scylladb.com/enterprise/upgrade/upgrade-enterprise/index.html>`_
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/gossip_generation_bug_warning.rst
|
||||
|
||||
.. note::
|
||||
|
||||
Scylla Enterprise 2019.1.6 added a new configuration to restrict the memory usage cartesian product IN queries.
|
||||
If you are using IN in SELECT operations and hitting a *"cartesian product size ... is greater than maximum"* error, you can either update the query (recommended) or bypass the warning temporarily by adding the following parameters to *scylla.yaml*:
|
||||
|
||||
* *max_clustering_key_restrictions_per_query: 1000*
|
||||
* *max_partition_key_restrictions_per_query: 1000*
|
||||
|
||||
The higher the values, the more likely you will hit an out of memory issue.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Scylla Enterprise 2019.1.8 added a new configuration to restrict the memory usage of reverse queries.
|
||||
If you are using reverse queries and hitting an error *"Aborting reverse partition read because partition ... is larger than the maximum safe size of ... for reversible partitions"* see the :doc:`reverse queries FAQ section </troubleshooting/reverse-queries>`.
|
||||
@@ -1,4 +0,0 @@
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/gossip_generation_bug_warning.rst
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/mv_si_rebuild_warning.rst
|
||||
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
.. note:: The note is only useful when CDC is GA supported in the target Scylla. Execute the following commands one node at the time, moving to the next node only **after** the upgrade procedure completed successfully.
|
||||
|
||||
.. warning::
|
||||
|
||||
If you are using CDC and upgrading Scylla 2020.1 to 2021.1, please review the API updates in :doc:`querying CDC streams </using-scylla/cdc/cdc-querying-streams>` and :doc:`CDC stream generations </using-scylla/cdc/cdc-stream-generations>`.
|
||||
In particular, you should update applications that use CDC according to :ref:`CDC Upgrade notes <scylla-4-3-to-4-4-upgrade>` **before** upgrading the cluster to 2021.1.
|
||||
|
||||
If you are using CDC and upgrading from pre 2020.1 version to 2020.1, note the :doc:`upgrading from experimental CDC </kb/cdc-experimental-upgrade>`.
|
||||
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/mv_si_rebuild_warning.rst
|
||||
@@ -1,6 +0,0 @@
|
||||
.. note:: The note is only useful when CDC is GA supported in the target ScyllaDB. Execute the following commands one node at a time, moving to the next node only **after** the upgrade procedure completed successfully.
|
||||
|
||||
.. warning::
|
||||
|
||||
If you are using CDC and upgrading ScyllaDB 2021.1 to 2022.1, please review the API updates in :doc:`querying CDC streams </using-scylla/cdc/cdc-querying-streams>` and :doc:`CDC stream generations </using-scylla/cdc/cdc-stream-generations>`.
|
||||
In particular, you should update applications that use CDC according to :ref:`CDC Upgrade notes <scylla-4-3-to-4-4-upgrade>` **before** upgrading the cluster to 2022.1.
|
||||
@@ -1,9 +0,0 @@
|
||||
|
||||
.. note::
|
||||
|
||||
If **any** of your instances are running Scylla Enterprise 2019.1.6 or earlier, **and** one of your Scylla nodes is up for more than a year, you might have been exposed to issue `#6063 <https://github.com/scylladb/scylla/pull/6083>`_.
|
||||
One way to check this is by comparing `Generation No` (from `nodetool gossipinfo` output) with the current time in Epoch format (`date +%s`), and check if the difference is higher than one year (31536000 seconds).
|
||||
See `scylla-check-gossiper-generation <https://github.com/scylladb/scylla-code-samples/tree/master/scylla-check-gossiper-generation>`_ for a script to do just that.
|
||||
|
||||
If this is the case, do **not** initiate the upgrade process before consulting with Scylla Support for further instructions.
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
.. warning::
|
||||
|
||||
If you are using materialized views or secondary indexes created in Scylla 2019.1.x and, **while** upgrading to 2020.1.x (7 or lower) updated your schema; you might have MV inconsistency.
|
||||
To fix: rebuild the MV.
|
||||
|
||||
It is recommended to avoid schema and topology updates during upgrade (mix cluster).
|
||||
@@ -1,50 +0,0 @@
|
||||
=============================
|
||||
Upgrade Scylla Enterprise
|
||||
=============================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:titlesonly:
|
||||
|
||||
ScyllaDB Enterprise 2022 <upgrade-guide-from-2022.x.y-to-2022.x.z/index>
|
||||
ScyllaDB Enterprise 2021 <upgrade-guide-from-2021.x.y-to-2021.x.z/index>
|
||||
ScyllaDB Enterprise 2020 <upgrade-guide-from-2020.x.y-to-2020.x.z/index>
|
||||
ScyllaDB Enterprise 2019 <upgrade-guide-from-2019.x.y-to-2019.x.z/index>
|
||||
ScyllaDB Enterprise 2018 <upgrade-guide-from-2018.x.y-to-2018.x.z/index>
|
||||
ScyllaDB Enterprise 2017 <upgrade-guide-from-2017.x.y-to-2017.x.z/index>
|
||||
ScyllaDB Enterprise 2022.1 to Scylla Enterprise 2022.2 <upgrade-guide-from-2022.1-to-2022.2/index>
|
||||
ScyllaDB Enterprise 2021.1 to Scylla Enterprise 2022.1 <upgrade-guide-from-2021.1-to-2022.1/index>
|
||||
ScyllaDB Enterprise 2020.1 to Scylla Enterprise 2021.1 <upgrade-guide-from-2020.1-to-2021.1/index>
|
||||
ScyllaDB Enterprise 2019.1 to Scylla Enterprise 2020.1 <upgrade-guide-from-2019.1-to-2020.1/index>
|
||||
ScyllaDB Enterprise 2018.1 to Scylla Enterprise 2019.1 <upgrade-guide-from-2018.1-to-2019.1/index>
|
||||
ScyllaDB Enterprise 2017.1 to Scylla Enterprise 2018.1 <upgrade-guide-from-2017.1-to-2018.1/index>
|
||||
Ubuntu 14.04 to 16.04 <upgrade-guide-from-ubuntu-14-to-16>
|
||||
|
||||
.. panel-box::
|
||||
:title: Upgrade ScyllaDB Enterprise
|
||||
:id: "getting-started"
|
||||
:class: my-panel
|
||||
|
||||
Procedures for upgrading to a new version of ScyllaDB Enterprise.
|
||||
|
||||
Patch Release Upgrade
|
||||
|
||||
* :doc:`Upgrade Guide - ScyllaDB Enterprise 2022.x </upgrade/upgrade-enterprise/upgrade-guide-from-2022.x.y-to-2022.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB Enterprise 2021.x <upgrade-guide-from-2021.x.y-to-2021.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB Enterprise 2020.x <upgrade-guide-from-2020.x.y-to-2020.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB Enterprise 2019.x <upgrade-guide-from-2019.x.y-to-2019.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB Enterprise 2018.x <upgrade-guide-from-2018.x.y-to-2018.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB Enterprise 2017.x <upgrade-guide-from-2017.x.y-to-2017.x.z/index>`
|
||||
|
||||
Major Release Upgrade
|
||||
|
||||
* :doc:`Upgrade Guide - From ScyllaDB Enterprise 2022.1 to Scylla Enterprise 2022.2 (minor release) <upgrade-guide-from-2022.1-to-2022.2/index>`
|
||||
* :doc:`Upgrade Guide - From ScyllaDB Enterprise 2021.1 to Scylla Enterprise 2022.1 <upgrade-guide-from-2021.1-to-2022.1/index>`
|
||||
* :doc:`Upgrade Guide - From ScyllaDB Enterprise 2020.1 to Scylla Enterprise 2021.1 <upgrade-guide-from-2020.1-to-2021.1/index>`
|
||||
* :doc:`Upgrade Guide - From ScyllaDB Enterprise 2019.1 to Scylla Enterprise 2020.1 <upgrade-guide-from-2019.1-to-2020.1/index>`
|
||||
* :doc:`Upgrade Guide - From ScyllaDB Enterprise 2018.1 to Scylla Enterprise 2019.1 <upgrade-guide-from-2018.1-to-2019.1/index>`
|
||||
* :doc:`Upgrade Guide - From ScyllaDB Enterprise 2017.1 to Scylla Enterprise 2018.1 <upgrade-guide-from-2017.1-to-2018.1/index>`
|
||||
* :doc:`Upgrade Guide - Ubuntu 14.04 to 16.04 <upgrade-guide-from-ubuntu-14-to-16>`
|
||||
|
||||
|
||||
* :ref:`Upgrade Unified Installer (relocatable executable) install <unified-installed-upgrade>`
|
||||
@@ -1,39 +0,0 @@
|
||||
==================================================
|
||||
Upgrade from Scylla Enterprise 2017.1 to 2018.1
|
||||
==================================================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:titlesonly:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2017.1-to-2018.1-rpm>
|
||||
Ubuntu <upgrade-guide-from-2017.1-to-2018.1-ubuntu>
|
||||
Debian <upgrade-guide-from-2017.1-to-2018.1-debian>
|
||||
Metrics <metric-update-2017.1-to-2018.1>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade to Scylla Enterprise 2018.1</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2017.1.x to 2018.1.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2017.1-to-2018.1-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2017.1.x to 2018.1.y on Ubuntu <upgrade-guide-from-2017.1-to-2018.1-ubuntu>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2017.1.x to 2018.1.y on Debian <upgrade-guide-from-2017.1-to-2018.1-debian>`
|
||||
* :doc:`Scylla Enterprise Metrics Update - Scylla 2017.1 to 2018.1<metric-update-2017.1-to-2018.1>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
@@ -1,291 +0,0 @@
|
||||
====================================================================
|
||||
Scylla Enterprise Metric Update - Scylla Enterprise 2017.1 to 2018.1
|
||||
====================================================================
|
||||
|
||||
|
||||
|
||||
Updated Metrics
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
The following metric names have changed between Scylla Enterprise 2017.1 and 2018.1
|
||||
|
||||
=========================================================================== ===========================================================================
|
||||
2017.1 2018.1
|
||||
=========================================================================== ===========================================================================
|
||||
scylla_batchlog_manager_total_operations_total_write_replay_attempts scylla_batchlog_manager_total_write_replay_attempts
|
||||
scylla_cache_objects_partitions scylla_cache_partitions
|
||||
scylla_cache_total_operations_concurrent_misses_same_key scylla_cache_concurrent_misses_same_key
|
||||
scylla_cache_total_operations_evictions scylla_cache_partition_evictions
|
||||
scylla_cache_total_operations_hits scylla_cache_partition_hits
|
||||
scylla_cache_total_operations_insertions scylla_cache_partition_insertions
|
||||
scylla_cache_total_operations_merges scylla_cache_partition_merges
|
||||
scylla_cache_total_operations_misses scylla_cache_partition_misses
|
||||
scylla_cache_total_operations_removals scylla_cache_partition_removals
|
||||
scylla_commitlog_memory_buffer_list_bytes scylla_commitlog_memory_buffer_bytes
|
||||
scylla_commitlog_memory_total_size scylla_commitlog_disk_total_bytes
|
||||
scylla_commitlog_queue_length_allocating_segments scylla_commitlog_allocating_segments
|
||||
scylla_commitlog_queue_length_pending_allocations scylla_commitlog_pending_allocations
|
||||
scylla_commitlog_queue_length_pending_flushes scylla_commitlog_pending_flushes
|
||||
scylla_commitlog_queue_length_segments scylla_commitlog_segments
|
||||
scylla_commitlog_queue_length_unused_segments scylla_commitlog_unused_segments
|
||||
scylla_commitlog_total_bytes_slack scylla_commitlog_slack
|
||||
scylla_commitlog_total_bytes_written scylla_commitlog_bytes_written
|
||||
scylla_commitlog_total_operations_alloc scylla_commitlog_alloc
|
||||
scylla_commitlog_total_operations_cycle scylla_commitlog_cycle
|
||||
scylla_commitlog_total_operations_flush scylla_commitlog_flush
|
||||
scylla_commitlog_total_operations_flush_limit_exceeded scylla_commitlog_flush_limit_exceeded
|
||||
scylla_commitlog_total_operations_requests_blocked_memory scylla_commitlog_requests_blocked_memory
|
||||
scylla_compaction_manager_objects_compactions scylla_compaction_manager_compactions
|
||||
scylla_cql_total_operations_batches scylla_cql_batches
|
||||
scylla_cql_total_operations_deletes scylla_cql_deletes
|
||||
scylla_cql_total_operations_inserts scylla_cql_inserts
|
||||
scylla_cql_total_operations_reads scylla_cql_reads
|
||||
scylla_cql_total_operations_updates scylla_cql_updates
|
||||
scylla_database_bytes_total_result_memory scylla_database_total_result_bytes
|
||||
scylla_database_queue_length_active_reads scylla_database_active_reads
|
||||
scylla_database_queue_length_active_reads_streaming scylla_database_active_reads
|
||||
scylla_database_queue_length_active_reads_system_keyspace scylla_database_active_reads
|
||||
scylla_database_queue_length_queued_reads scylla_database_queued_reads
|
||||
scylla_database_queue_length_queued_reads_streaming scylla_database_queued_reads
|
||||
scylla_database_queue_length_queued_reads_system_keyspace scylla_database_queued_reads
|
||||
scylla_database_queue_length_requests_blocked_memory scylla_database_requests_blocked_memory_current
|
||||
scylla_database_total_operations_clustering_filter_count scylla_database_clustering_filter_count
|
||||
scylla_database_total_operations_clustering_filter_fast_path_count scylla_database_clustering_filter_fast_path_count
|
||||
scylla_database_total_operations_clustering_filter_sstables_checked scylla_database_clustering_filter_sstables_checked
|
||||
scylla_database_total_operations_clustering_filter_surviving_sstables scylla_database_clustering_filter_surviving_sstables
|
||||
scylla_database_total_operations_requests_blocked_memory scylla_database_requests_blocked_memory
|
||||
scylla_database_total_operations_short_data_queries scylla_database_short_data_queries
|
||||
scylla_database_total_operations_short_mutation_queries scylla_database_short_mutation_queries
|
||||
scylla_database_total_operations_sstable_read_queue_overloads scylla_database_sstable_read_queue_overloads
|
||||
scylla_database_total_operations_total_reads scylla_database_total_reads
|
||||
scylla_database_total_operations_total_reads_failed scylla_database_total_reads_failed
|
||||
scylla_database_total_operations_total_writes scylla_database_total_writes
|
||||
scylla_database_total_operations_total_writes_failed scylla_database_total_writes_failed
|
||||
scylla_database_total_operations_total_writes_timedout scylla_database_total_writes_timedout
|
||||
scylla_gossip_derive_heart_beat_version scylla_gossip_heart_beat
|
||||
scylla_http_0_connections_http_connections scylla_httpd_connections_total
|
||||
scylla_http_0_current_connections_current scylla_httpd_connections_current
|
||||
scylla_http_0_http_requests_served scylla_httpd_requests_served
|
||||
scylla_io_queue_delay_commitlog scylla_io_queue_commitlog_delay
|
||||
scylla_io_queue_delay_compaction scylla_io_queue_compaction_delay
|
||||
scylla_io_queue_delay_default scylla_io_queue_default_delay
|
||||
scylla_io_queue_delay_memtable_flush scylla_io_queue_memtable_flush_delay
|
||||
scylla_io_queue_derive_commitlog scylla_io_queue_commitlog_total_bytes
|
||||
scylla_io_queue_derive_compaction scylla_io_queue_compaction_total_bytes
|
||||
scylla_io_queue_derive_default scylla_io_queue_default_total_bytes
|
||||
scylla_io_queue_derive_memtable_flush scylla_io_queue_memtable_flush_total_bytes
|
||||
scylla_io_queue_queue_length_commitlog scylla_io_queue_commitlog_queue_length
|
||||
scylla_io_queue_queue_length_compaction scylla_io_queue_compaction_queue_length
|
||||
scylla_io_queue_queue_length_default scylla_io_queue_default_queue_length
|
||||
scylla_io_queue_queue_length_memtable_flush scylla_io_queue_memtable_flush_queue_length
|
||||
scylla_io_queue_total_operations_commitlog scylla_io_queue_commitlog_total_operations
|
||||
scylla_io_queue_total_operations_compaction scylla_io_queue_compaction_total_operations
|
||||
scylla_io_queue_total_operations_default scylla_io_queue_default_total_operations
|
||||
scylla_io_queue_total_operations_memtable_flush scylla_io_queue_memtable_flush_total_operations
|
||||
scylla_lsa_bytes_free_space_in_zones scylla_lsa_free_space_in_zones
|
||||
scylla_lsa_bytes_large_objects_total_space scylla_lsa_large_objects_total_space_bytes
|
||||
scylla_lsa_bytes_non_lsa_used_space scylla_lsa_non_lsa_used_space_bytes
|
||||
scylla_lsa_bytes_small_objects_total_space scylla_lsa_small_objects_total_space_bytes
|
||||
scylla_lsa_bytes_small_objects_used_space scylla_lsa_small_objects_used_space_bytes
|
||||
scylla_lsa_bytes_total_space scylla_lsa_total_space_bytes
|
||||
scylla_lsa_bytes_used_space scylla_lsa_used_space_bytes
|
||||
scylla_lsa_objects_zones scylla_lsa_zones
|
||||
scylla_lsa_operations_segments_compacted scylla_lsa_segments_compacted
|
||||
scylla_lsa_operations_segments_migrated scylla_lsa_segments_migrated
|
||||
scylla_lsa_percent_occupancy scylla_lsa_occupancy
|
||||
scylla_memory_bytes_dirty scylla_memory_dirty_bytes
|
||||
scylla_memory_bytes_regular_dirty scylla_memory_regular_dirty_bytes
|
||||
scylla_memory_bytes_regular_virtual_dirty scylla_memory_regular_virtual_dirty_bytes
|
||||
scylla_memory_bytes_streaming_dirty scylla_memory_streaming_dirty_bytes
|
||||
scylla_memory_bytes_streaming_virtual_dirty scylla_memory_streaming_virtual_dirty_bytes
|
||||
scylla_memory_bytes_system_dirty scylla_memory_system_dirty_bytes
|
||||
scylla_memory_bytes_system_virtual_dirty scylla_memory_system_virtual_dirty_bytes
|
||||
scylla_memory_bytes_virtual_dirty scylla_memory_virtual_dirty_bytes
|
||||
scylla_memory_memory_allocated_memory scylla_memory_allocated_memory
|
||||
scylla_memory_memory_free_memory scylla_memory_free_memory
|
||||
scylla_memory_memory_total_memory scylla_memory_total_memory
|
||||
scylla_memory_objects_malloc scylla_memory_malloc_live_objects
|
||||
scylla_memory_total_operations_cross_cpu_free scylla_memory_cross_cpu_free_operations
|
||||
scylla_memory_total_operations_free scylla_memory_free_operations
|
||||
scylla_memory_total_operations_malloc scylla_memory_malloc_operations
|
||||
scylla_memory_total_operations_reclaims scylla_memory_reclaims_operations
|
||||
scylla_memtables_bytes_pending_flushes scylla_memtables_pending_flushes
|
||||
scylla_memtables_queue_length_pending_flushes scylla_memtables_pending_flushes_bytes
|
||||
scylla_query_processor_total_operations_statements_prepared scylla_query_processor_statements_prepared
|
||||
scylla_reactor_derive_aio_read_bytes scylla_reactor_aio_bytes_read
|
||||
scylla_reactor_derive_aio_write_bytes scylla_reactor_aio_bytes_write
|
||||
scylla_reactor_derive_busy_ns scylla_reactor_cpu_busy_ns
|
||||
scylla_reactor_derive_polls scylla_reactor_polls
|
||||
scylla_reactor_gauge_load scylla_reactor_utilization
|
||||
scylla_reactor_gauge_queued_io_requests scylla_reactor_io_queue_requests
|
||||
scylla_reactor_queue_length_tasks_pending scylla_reactor_tasks_pending
|
||||
scylla_reactor_queue_length_timers_pending scylla_reactor_timers_pending
|
||||
scylla_reactor_total_operations_aio_reads scylla_reactor_aio_reads
|
||||
scylla_reactor_total_operations_aio_writes scylla_reactor_aio_writes
|
||||
scylla_reactor_total_operations_cexceptions scylla_reactor_cpp_exceptions
|
||||
scylla_reactor_total_operations_fsyncs scylla_reactor_fsyncs
|
||||
scylla_reactor_total_operations_io_threaded_fallbacks scylla_reactor_io_threaded_fallbacks
|
||||
scylla_reactor_total_operations_logging_failures scylla_reactor_logging_failures
|
||||
scylla_reactor_total_operations_tasks_processed scylla_reactor_tasks_processed
|
||||
scylla_storage_proxy_coordinator_background_reads scylla_storage_proxy_coordinator_background_read_repairs
|
||||
scylla_storage_proxy_coordinator_completed_data_reads_local_node scylla_storage_proxy_coordinator_completed_reads_local_node
|
||||
scylla_storage_proxy_coordinator_data_read_errors_local_node scylla_storage_proxy_coordinator_read_errors_local_node
|
||||
scylla_storage_proxy_coordinator_data_reads_local_node scylla_storage_proxy_coordinator_reads_local_node
|
||||
scylla_streaming_derive_total_incoming_bytes scylla_streaming_total_incoming_bytes
|
||||
scylla_streaming_derive_total_outgoing_bytes scylla_streaming_total_outgoing_bytes
|
||||
scylla_thrift_connections_thrift_connections scylla_thrift_current_connections
|
||||
scylla_thrift_current_connections_current scylla_thrift_thrift_connections
|
||||
scylla_thrift_total_requests_served scylla_thrift_served
|
||||
scylla_tracing_keyspace_helper_total_operations_bad_column_family_errors scylla_tracing_keyspace_helper_bad_column_family_errors
|
||||
scylla_tracing_keyspace_helper_total_operations_tracing_errors scylla_tracing_keyspace_helper_tracing_errors
|
||||
scylla_tracing_queue_length_active_sessions scylla_tracing_active_sessions
|
||||
scylla_tracing_queue_length_cached_records scylla_tracing_cached_records
|
||||
scylla_tracing_queue_length_flushing_records scylla_tracing_flushing_records
|
||||
scylla_tracing_queue_length_pending_for_write_records scylla_tracing_pending_for_write_records
|
||||
scylla_tracing_total_operations_dropped_records scylla_tracing_dropped_records
|
||||
scylla_tracing_total_operations_dropped_sessions scylla_tracing_dropped_sessions
|
||||
scylla_tracing_total_operations_trace_errors scylla_tracing_trace_errors
|
||||
scylla_tracing_total_operations_trace_records_count scylla_tracing_trace_records_count
|
||||
scylla_transport_connections_cql_connections scylla_transport_cql_connections
|
||||
scylla_transport_current_connections_current scylla_transport_current_connections
|
||||
scylla_transport_queue_length_requests_blocked_memory scylla_transport_requests_blocked_memory
|
||||
scylla_transport_queue_length_requests_serving scylla_transport_requests_serving
|
||||
scylla_transport_total_requests_requests_served scylla_transport_requests_served
|
||||
=========================================================================== ===========================================================================
|
||||
|
||||
|
||||
New Metrics
|
||||
~~~~~~~~~~~
|
||||
|
||||
The following metrics are new in 2018.1
|
||||
|
||||
+--------------------------------------------------------------------------+
|
||||
| New Metric Name |
|
||||
+==========================================================================+
|
||||
| scylla_cache_active_reads |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_garbage_partitions |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_mispopulations |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_evictions_from_garbage |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_pinned_dirty_memory_overload |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_reads |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_reads_with_misses |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_row_hits |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_row_insertions |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_row_misses |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_sstable_partition_skips |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_sstable_reader_recreations |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_sstable_row_skips |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_batches_pure_logged |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_batches_pure_unlogged |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_batches_unlogged_from_logged |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_prepared_cache_evictions |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_prepared_cache_memory_footprint |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_prepared_cache_size |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cql_statements_in_batches |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_database_active_reads_memory_consumption |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_database_counter_cell_lock_acquisition |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_database_counter_cell_lock_pending |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_database_cpu_flush_quota |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_execution_stages_function_calls_enqueued |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_execution_stages_function_calls_executed |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_execution_stages_tasks_preempted |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_execution_stages_tasks_scheduled |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_httpd_read_errors |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_httpd_reply_errors |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_scheduler_queue_length |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_scheduler_runtime_ms |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_scheduler_shares |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_scheduler_tasks_processed |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_scylladb_current_version |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_sstables_index_page_blocks |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_sstables_index_page_hits |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_sstables_index_page_misses |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_background_reads |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_foreground_read_repair |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_foreground_reads |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_read_latency |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_write_latency |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_replica_reads |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_replica_received_counter_updates |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_transport_unpaged_queries |
|
||||
+--------------------------------------------------------------------------+
|
||||
|
||||
|
||||
Deprecated Metrics
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The following metrics are deprecated in 2018.1
|
||||
|
||||
+--------------------------------------------------------------------------+
|
||||
| Deprecated Metric Name |
|
||||
+==========================================================================+
|
||||
| scylla_cache_total_operations_uncached_wide_partitions |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_cache_total_operations_wide_partition_evictions |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_io_queue_delay_query |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_io_queue_derive_query |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_io_queue_queue_length_query |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_io_queue_total_operations_query |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_digest_read_errors_local_node |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_digest_reads_local_node |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_mutation_data_read_errors_local_node |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_mutation_data_reads_local_node |
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_completed_mutation_data_reads_local_node|
|
||||
+--------------------------------------------------------------------------+
|
||||
| scylla_storage_proxy_coordinator_reads |
|
||||
+--------------------------------------------------------------------------+
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
.. |OS| replace:: Debian 8
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/debian8/
|
||||
.. |ENABLE_APT_REPO| replace:: echo 'deb http://http.debian.net/debian jessie-backports main' > /etc/apt/sources.list.d/jessie-backports.list
|
||||
.. |JESSIE_BACKPORTS| replace:: -t jessie-backports openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2017.1-to-2018.1-ubuntu-and-debian.rst
|
||||
@@ -1,180 +0,0 @@
|
||||
=============================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2017.1 to 2018.1 for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2017.1 to Scylla Enterprise 2018.1, and rollback to 2017.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2017.1.x to Scylla Enterprise version 2018.1.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2018.1 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to the upgrade. The upgrade will fail if there is a schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2017.1
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-2017.1-2018.1-rpm-rollback-procedure>` the upgrade. If you are not running a 2017.1.x version, stop right here! This guide only covers 2017.1.x to 2018.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM Enterprise repo <http://www.scylladb.com/enterprise-download/centos_rpm/>`_ to **2018.1**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla Enterprise 2017.1 to 2018.1<metric-update-2017.1-to-2018.1>`
|
||||
|
||||
.. _upgrade-2017.1-2018.1-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2018.1.x to 2017.1.y. Apply this procedure if an upgrade from 2017.1 to 2018.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2018.1
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2017.1, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM Enterprise repo <http://www.scylladb.com/enterprise-download/centos_rpm/>`_ to **2017.1**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo rm -rf /var/cache/yum
|
||||
sudo yum remove scylla\*tools-core
|
||||
sudo yum downgrade scylla\* -y
|
||||
sudo yum install scylla-enterprise
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2017.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2018.1 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,8 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 14.04 or 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2017.1-to-2018.1/upgrade-guide-from-2017.1-to-2018.1-ubuntu/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/
|
||||
.. |ENABLE_APT_REPO| replace:: sudo add-apt-repository -y ppa:openjdk-r/ppa
|
||||
.. |JESSIE_BACKPORTS| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2017.1-to-2018.1-ubuntu-and-debian.rst
|
||||
@@ -1,38 +0,0 @@
|
||||
=====================================================
|
||||
Upgrade Scylla Enterprise 2017
|
||||
=====================================================
|
||||
|
||||
.. toctree::
|
||||
:titlesonly:
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2017.x.y-to-2017.x.z-rpm>
|
||||
Ubuntu <upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu>
|
||||
Debian <upgrade-guide-from-2017.x.y-to-2017.x.z-debian>
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade to Scylla Enterprise 2017.x.z</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2017.x.y to 2017.x.z on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2017.x.y-to-2017.x.z-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2017.x.y to 2017.x.z on Ubuntu <upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2017.x.y to 2017.x.z on Debian <upgrade-guide-from-2017.x.y-to-2017.x.z-debian>`
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Debian 8
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/debian8/
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,153 +0,0 @@
|
||||
===========================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2017.x.y to 2017.x.z for Red Hat Enterprise 7 or CentOS 7
|
||||
===========================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2017.x.y to 2017.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla Enterprise from the following versions: 2017.x.y to 2017.x.z, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2017.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2017.x.z
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-2017.x.y-to-2017.x.z-rpm-rollback-procedure>` the upgrade. If you are not running a 2017.x.y version, stop right here! This guide only covers 2017.x.y to 2017.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla Enterprise RPM repo <http://www.scylladb.com/enterprise-download/centos_rpm>`_ to **2017.x**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-2017.x.y-to-2017.x.z-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2017.x.z to 2017.x.y. Apply this procedure if an upgrade from 2017.x.y to 2017.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2017.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2017.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum downgrade scylla\*-2017.x.y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2017.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 14.04 or 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2017.x.y-to-2017.x.z/upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2017.x.y-to-2017.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,37 +0,0 @@
|
||||
==================================================
|
||||
Upgrade from Scylla Enterprise 2018.1 to 2019.1
|
||||
==================================================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:titlesonly:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2018.1-to-2019.1-rpm>
|
||||
Ubuntu 16.04 <upgrade-guide-from-2018.1-to-2019.1-ubuntu-16-04>
|
||||
Metrics <metric-update-2018.1-to-2019.1>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade to Scylla Enterprise 2019.1</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2018.1.x to 2019.1.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2018.1-to-2019.1-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2018.1.x to 2019.1.y on Ubuntu 16.04 <upgrade-guide-from-2018.1-to-2019.1-ubuntu-16-04>`
|
||||
* :doc:`Scylla Enterprise Metrics Update - Scylla 2018.1 to 2019.1<metric-update-2018.1-to-2019.1>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
====================================================================
|
||||
Scylla Enterprise Metric Update - Scylla Enterprise 2018.1 to 2019.1
|
||||
====================================================================
|
||||
|
||||
|
||||
New Metrics
|
||||
~~~~~~~~~~~
|
||||
|
||||
The following metrics are new in 2019.1 compare to 2018.1
|
||||
|
||||
* scylla_alien_receive_batch_queue_length
|
||||
* scylla_alien_total_received_messages
|
||||
* scylla_alien_total_sent_messages
|
||||
* scylla_cql_authorized_prepared_statements_cache_evictions
|
||||
* scylla_cql_authorized_prepared_statements_cache_size
|
||||
* scylla_cql_filtered_read_requests
|
||||
* scylla_cql_filtered_rows_dropped_total
|
||||
* scylla_cql_filtered_rows_matched_total
|
||||
* scylla_cql_filtered_rows_read_total
|
||||
* scylla_cql_rows_read
|
||||
* scylla_cql_secondary_index_creates
|
||||
* scylla_cql_secondary_index_drops
|
||||
* scylla_cql_secondary_index_reads
|
||||
* scylla_cql_secondary_index_rows_read
|
||||
* scylla_cql_unpaged_select_queries
|
||||
* scylla_cql_user_prepared_auth_cache_footprint
|
||||
* scylla_database_dropped_view_updates
|
||||
* scylla_database_large_partition_exceeding_threshold
|
||||
* scylla_database_multishard_query_failed_reader_saves
|
||||
* scylla_database_multishard_query_failed_reader_stops
|
||||
* scylla_database_multishard_query_unpopped_bytes
|
||||
* scylla_database_multishard_query_unpopped_fragments
|
||||
* scylla_database_paused_reads
|
||||
* scylla_database_paused_reads_permit_based_evictions
|
||||
* scylla_database_total_view_updates_failed_local
|
||||
* scylla_database_total_view_updates_failed_remote
|
||||
* scylla_database_total_view_updates_pushed_local
|
||||
* scylla_database_total_view_updates_pushed_remote
|
||||
* scylla_database_view_building_paused
|
||||
* scylla_database_view_update_backlog
|
||||
* scylla_hints_for_views_manager_corrupted_files
|
||||
* scylla_hints_for_views_manager_discarded
|
||||
* scylla_hints_for_views_manager_dropped
|
||||
* scylla_hints_for_views_manager_errors
|
||||
* scylla_hints_for_views_manager_sent
|
||||
* scylla_hints_for_views_manager_size_of_hints_in_progress
|
||||
* scylla_hints_for_views_manager_written
|
||||
* scylla_hints_manager_corrupted_files
|
||||
* scylla_hints_manager_discarded
|
||||
* scylla_hints_manager_dropped
|
||||
* scylla_hints_manager_errors
|
||||
* scylla_hints_manager_sent
|
||||
* scylla_hints_manager_size_of_hints_in_progress
|
||||
* scylla_hints_manager_written
|
||||
* scylla_node_operation_mode
|
||||
* scylla_query_processor_queries
|
||||
* scylla_reactor_aio_errors
|
||||
* scylla_reactor_cpu_steal_time_ms
|
||||
* scylla_scheduler_time_spent_on_task_quota_violations_ms
|
||||
* scylla_sstables_capped_local_deletion_time
|
||||
* scylla_sstables_capped_tombstone_deletion_time
|
||||
* scylla_sstables_cell_tombstone_writes
|
||||
* scylla_sstables_cell_writes
|
||||
* scylla_sstables_partition_reads
|
||||
* scylla_sstables_partition_seeks
|
||||
* scylla_sstables_partition_writes
|
||||
* scylla_sstables_range_partition_reads
|
||||
* scylla_sstables_range_tombstone_writes
|
||||
* scylla_sstables_row_reads
|
||||
* scylla_sstables_row_writes
|
||||
* scylla_sstables_single_partition_reads
|
||||
* scylla_sstables_sstable_partition_reads
|
||||
* scylla_sstables_static_row_writes
|
||||
* scylla_sstables_tombstone_writes
|
||||
* scylla_storage_proxy_coordinator_background_replica_writes_failed_local_node
|
||||
* scylla_storage_proxy_coordinator_background_writes_failed
|
||||
* scylla_storage_proxy_coordinator_last_mv_flow_control_delay
|
||||
* scylla_storage_proxy_replica_cross_shard_ops
|
||||
* scylla_transport_requests_blocked_memory_current
|
||||
* scylla_io_queue_shares
|
||||
|
||||
Updated Metrics
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
The following metric names have changed between Scylla Enterprise 2018.1 and 2019.1
|
||||
|
||||
.. list-table::
|
||||
:widths: 30 30
|
||||
:header-rows: 1
|
||||
|
||||
* - Scylla 2018.1 Name
|
||||
- Scylla 2019.1 Name
|
||||
* - scylla_io_queue_compaction_queue_length
|
||||
- scylla_io_queue_queue_length
|
||||
* - scylla_io_queue_compaction_total_bytes
|
||||
- scylla_io_queue_total_bytes
|
||||
* - scylla_io_queue_compaction_total_operations
|
||||
- scylla_io_queue_total_operations
|
||||
* - scylla_io_queue_default_delay
|
||||
- scylla_io_queue_delay
|
||||
* - scylla_io_queue_default_queue_length
|
||||
- scylla_io_queue_queue_length
|
||||
* - scylla_io_queue_default_total_bytes
|
||||
- scylla_io_queue_total_bytes
|
||||
* - scylla_io_queue_default_total_operations
|
||||
- scylla_io_queue_total_operations
|
||||
* - scylla_io_queue_memtable_flush_delay
|
||||
- scylla_io_queue_delay
|
||||
* - scylla_io_queue_memtable_flush_queue_length
|
||||
- scylla_io_queue_queue_length
|
||||
* - scylla_io_queue_memtable_flush_total_bytes
|
||||
- scylla_io_queue_total_bytes
|
||||
* - scylla_io_queue_memtable_flush_total_operations
|
||||
- scylla_io_queue_total_operations
|
||||
* - scylla_io_queue_commitlog_delay
|
||||
- scylla_io_queue_delay
|
||||
* - scylla_io_queue_commitlog_queue_length
|
||||
- scylla_io_queue_queue_length
|
||||
* - scylla_io_queue_commitlog_total_bytes
|
||||
- scylla_io_queue_total_bytes
|
||||
* - scylla_io_queue_commitlog_total_operations
|
||||
- scylla_io_queue_total_operations
|
||||
* - scylla_io_queue_compaction_delay
|
||||
- scylla_io_queue_delay
|
||||
* - scylla_reactor_cpu_busy_ns
|
||||
- scylla_reactor_cpu_busy_ms
|
||||
* - scylla_storage_proxy_coordinator_current_throttled_writes
|
||||
- scylla_storage_proxy_coordinator_current_throttled_base_writes
|
||||
|
||||
Deprecated Metrics
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* scylla_database_cpu_flush_quota
|
||||
* scylla_scollectd_latency
|
||||
* scylla_scollectd_records
|
||||
* scylla_scollectd_total_bytes_sent
|
||||
* scylla_scollectd_total_requests
|
||||
* scylla_scollectd_total_time_in_ms
|
||||
* scylla_scollectd_total_values
|
||||
* scylla_transport_unpaged_queries
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
=============================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2018.1 to 2019.1 for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2018.1 to Scylla Enterprise 2019.1, and rollback to 2018.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2018.1.7 or later to Scylla Enterprise version 2019.1.y, on the following platforms:
|
||||
|
||||
.. note::
|
||||
|
||||
This upgrade procedure only works from **2018.1.7** or later. If you have an older Scylla Enterprise 2018.1.x version, please contact the Scylla Support team for advice.
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/enterprise_2019.1_warnings.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2019.1 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. include:: /upgrade/_common/upgrade_to_2019_warning.rst
|
||||
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2018.1
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-2018.1-2019.1-rpm-rollback-procedure>` the upgrade. If you are not running a 2018.1.x version, stop right here! This guide only covers 2018.1.x to 2019.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM Enterprise repo <http://www.scylladb.com/enterprise-download/centos_rpm/>`_ to **2019.1**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla Enterprise 2018.1 to 2019.1<metric-update-2018.1-to-2019.1>`
|
||||
|
||||
.. _upgrade-2018.1-2019.1-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2019.1.x to 2018.1.y. Apply this procedure if an upgrade from 2018.1 to 2019.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2019.1
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2018.1, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM Enterprise repo <http://www.scylladb.com/enterprise-download/centos_rpm/>`_ to **2018.1**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo rm -rf /var/cache/yum
|
||||
sudo yum remove scylla\*tools-core
|
||||
sudo yum downgrade scylla\* -y
|
||||
sudo yum install scylla-enterprise
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2018.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2019.1 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2018.1-to-2019.1/upgrade-guide-from-2018.1-to-2019.1-ubuntu-16-04/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/download/enterprise/scylla-ubuntu-16-04/
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2018.1-to-2019.1-ubuntu-and-debian.rst
|
||||
@@ -1,35 +0,0 @@
|
||||
=====================================================
|
||||
Upgrade Scylla Enterprise 2018
|
||||
=====================================================
|
||||
|
||||
.. toctree::
|
||||
:titlesonly:
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2018.x.y-to-2018.x.z-rpm>
|
||||
Ubuntu <upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu>
|
||||
Debian <upgrade-guide-from-2018.x.y-to-2018.x.z-debian>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla Enterprise</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2018.x.y to 2018.x.z on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2018.x.y-to-2018.x.z-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2018.x.y to 2018.x.z on Ubuntu <upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2018.x.y to 2018.x.z on Debian <upgrade-guide-from-2018.x.y-to-2018.x.z-debian>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Debian 8
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/debian8/
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,166 +0,0 @@
|
||||
===========================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2018.x.y to 2018.x.z for Red Hat Enterprise 7 or CentOS 7
|
||||
===========================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2018.x.y to 2018.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla Enterprise from the following versions: 2018.x.y to 2018.x.z, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/gossip_generation_bug_warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2018.x.z features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes.
|
||||
* Not to apply schema changes.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2018.x.z
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-2018.x.y-to-2018.x.z-rpm-rollback-procedure>` the upgrade. If you are not running a 2018.x.y version, stop right here! This guide only covers 2018.x.y to 2018.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla Enterprise RPM repo <http://www.scylladb.com/enterprise-download/centos_rpm>`_ to **2018.x**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-2018.x.y-to-2018.x.z-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2018.x.z to 2018.x.y. Apply this procedure if an upgrade from 2018.x.y to 2018.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2018.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2018.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Downgrade to previous release
|
||||
--------------------------------------------------
|
||||
1. Install
|
||||
|
||||
|
||||
Scylla-enterprise 2018.1.5 starts to use new gcc packages, the gcc packages should be removed before downgrade if you upgrade from 2018.1.x(x<5) to 2018.1.y(y>=5).
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum remove scylla-libgcc73 scylla-libstdc++73 -y
|
||||
sudo yum downgrade scylla\*-2018.x.y -y
|
||||
sudo yum install scylla-enterprise
|
||||
|
||||
If you don't upgrade from 2018.1.x(x<5) to 2018.1.y(y>=5), you can smoothly downgrade the packages.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum downgrade scylla\*-2018.x.y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2018.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 14.04 or 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2018.x.y-to-2018.x.z/upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2018.x.y-to-2018.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,41 +0,0 @@
|
||||
==================================================
|
||||
Upgrade from Scylla Enterprise 2019.1 to 2020.1
|
||||
==================================================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:titlesonly:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2019.1-to-2020.1-rpm>
|
||||
Ubuntu 16.04 <upgrade-guide-from-2019.1-to-2020.1-ubuntu-16-04>
|
||||
Ubuntu 18.04 <upgrade-guide-from-2019.1-to-2020.1-ubuntu-18-04>
|
||||
Debian <upgrade-guide-from-2019.1-to-2020.1-debian>
|
||||
Metrics <metric-update-2019.1-to-2020.1>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade to Scylla Enterprise 2020.1</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.1.x to 2020.1.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2019.1-to-2020.1-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.1.x to 2020.1.y on Ubuntu 16.04 <upgrade-guide-from-2019.1-to-2020.1-ubuntu-16-04>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.1.x to 2020.1.y on Ubuntu 18.04 <upgrade-guide-from-2019.1-to-2020.1-ubuntu-18-04>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.1.x to 2020.1.y on Debian <upgrade-guide-from-2019.1-to-2020.1-debian>`
|
||||
* :doc:`Scylla Enterprise Metrics Update - Scylla 2019.1 to 2020.1<metric-update-2019.1-to-2020.1>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
====================================================================
|
||||
Scylla Enterprise Metric Update - Scylla Enterprise 2019.1 to 2020.1
|
||||
====================================================================
|
||||
|
||||
|
||||
The following metrics are new in 2020.1 compared to 2019.1
|
||||
|
||||
CQL metrics
|
||||
~~~~~~~~~~~
|
||||
|
||||
* *scylla_cql_deletes_per_ks* : Counts the number of CQL DELETE requests executed on particular keyspaces. The label 'who' indicates where the reqs come from (clients or DB internals)
|
||||
* *scylla_cql_inserts_per_ks* : Counts the number of CQL INSERT requests executed on particular keyspaces. The label 'who' indicates where the reqs come from (clients or DB internals).
|
||||
* *scylla_cql_reads_per_ks* : Counts the number of CQL SELECT requests executed on particular keyspaces. The label 'who' indicates where the reqs come from (clients or DB internals)
|
||||
* *scylla_cql_select_allow_filtering* : Counts the number of SELECT query executions with ALLOW FILTERING option.
|
||||
* *scylla_cql_select_bypass_caches* : Counts the number of SELECT query executions with BYPASS CACHE option.
|
||||
* *scylla_cql_select_partition_range_scan* : Counts the number of SELECT query executions requiring partition range scan.
|
||||
* *scylla_cql_select_partition_range_scan_no_bypass_cache* : Counts the number of SELECT query executions requiring partition range scan without BYPASS CACHE option.
|
||||
* *scylla_cql_unpaged_select_queries_per_ks* : Counts the number of unpaged CQL SELECT requests against particular keyspaces.
|
||||
* *scylla_cql_updates_per_ks* : Counts the number of CQL UPDATE requests executed on particular keyspaces. The label 'who' indicates where the reqs come from (clients or DB internals)
|
||||
|
||||
SSTable metrics
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
* *scylla_sstables_capped_local_deletion_time* : Was local deletion time capped at maximum allowed value in Statistics
|
||||
* *scylla_sstables_capped_tombstone_deletion_time* : Was partition tombstone deletion time capped at maximum allowed value
|
||||
* *scylla_sstables_cell_tombstone_writes* : Number of cell tombstones written
|
||||
* *scylla_sstables_cell_writes* : Number of cells written
|
||||
* *scylla_sstables_partition_reads* : Number of partitions read
|
||||
* *scylla_sstables_partition_seeks* : Number of partitions seeked
|
||||
* *scylla_sstables_partition_writes* : Number of partitions written
|
||||
* *scylla_sstables_range_partition_reads* : Number of partition range flat mutation reads
|
||||
* *scylla_sstables_range_tombstone_writes* : Number of range tombstones written
|
||||
* *scylla_sstables_row_reads* : Number of rows read
|
||||
* *scylla_sstables_row_writes* : Number of clustering rows written
|
||||
* *scylla_sstables_single_partition_reads* : Number of single partition flat mutation reads
|
||||
* *scylla_sstables_sstable_partition_reads* : Number of whole sstable flat mutation reads
|
||||
* *scylla_sstables_static_row_writes* : Number of static rows written
|
||||
* *scylla_sstables_tombstone_writes* : Number of tombstones written
|
||||
|
||||
Storage Proxy Metrics
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* *scylla_storage_proxy_coordinator_cas_dropped_prune* : How many times a coordinator did not perfom prune after cas
|
||||
* *scylla_storage_proxy_coordinator_cas_failed_read_round_optimization* : Cas read rounds issued only if previous value is missing on some replica
|
||||
* *scylla_storage_proxy_coordinator_cas_prune* : How many times paxos prune was done after successful cas operation
|
||||
* *scylla_storage_proxy_coordinator_cas_read_contention* : How many contended reads were encountered
|
||||
* *scylla_storage_proxy_coordinator_cas_read_latency* : Transactional read latency histogram
|
||||
* *scylla_storage_proxy_coordinator_cas_read_timeouts* : Number of transactional read request failed due to a timeout
|
||||
* *scylla_storage_proxy_coordinator_cas_read_unavailable* : Number of transactional read requests failed due to an "unavailable" error
|
||||
* *scylla_storage_proxy_coordinator_cas_read_unfinished_commit* : Number of transaction commit attempts that occurred on read
|
||||
* *scylla_storage_proxy_coordinator_cas_write_condition_not_met* : Number of transaction preconditions that did not match current values
|
||||
* *scylla_storage_proxy_coordinator_cas_write_contention* : How many contended writes were encountered
|
||||
* *scylla_storage_proxy_coordinator_cas_write_latency* : Transactional write latency histogram
|
||||
* *scylla_storage_proxy_coordinator_cas_write_timeout_due_to_uncertainty* : How many times write timeout was reported because of uncertainty in the result
|
||||
* *scylla_storage_proxy_coordinator_cas_write_timeouts* : Number of transactional write request failed due to a timeout
|
||||
* *scylla_storage_proxy_coordinator_cas_write_unavailable* : Number of transactional write requests failed due to an "unavailable" error
|
||||
* *scylla_storage_proxy_coordinator_cas_write_unfinished_commit* : Number of transaction commit attempts that occurred on write
|
||||
* *scylla_storage_proxy_coordinator_foreground_read_repairs* : Number of foreground read repairs
|
||||
* *scylla_storage_proxy_coordinator_reads_coordinator_outside_replica_set* : Number of CQL read requests which arrived to a non-replica and had to be forwarded to a replica
|
||||
* *scylla_storage_proxy_coordinator_writes_coordinator_outside_replica_set* : Number of CQL write requests which arrived to a non-replica and had to be forwarded to a replica
|
||||
* *scylla_storage_proxy_replica_cas_dropped_prune* : How many times a coordinator did not perfom prune after cas
|
||||
* *scylla_tracing_keyspace_helper_bad_column_family_errors*
|
||||
* *Scylla_tracing_keyspace_helper_tracing_errors*
|
||||
|
||||
Other metrics
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
* *scylla_stall_detector_reported* : Total number of reported stalls. Look in the traces for the exact reason
|
||||
* *scylla_database_paused_reads* : The number of currently active reads that are temporarily paused.
|
||||
* *scylla_database_paused_reads_permit_based_evictions* : The number of paused reads evicted to free up permits. Permits are required for new reads to start, and the database will evict paused reads (if any) to be able to admit new ones if there is a shortage of permits.
|
||||
* *scylla_database_schema_changed* : The number of times the schema changed
|
||||
* *scylla_memtables_failed_flushes* : Holds the number of failed memtable flushes. A high value in this metric may indicate a permanent failure to flush a memtable.
|
||||
* *scylla_query_processor_queries* : Counts queries by consistency level.
|
||||
* *scylla_reactor_abandoned_failed_futures* : Total number of abandoned failed futures, futures destroyed while still containing an exception
|
||||
* *scylla_reactor_aio_errors* : Total aio errors
|
||||
|
||||
|
||||
CDC Metrics (disabled in 2020.1.0)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* *scylla_cdc_operations_failed* : Number of failed CDC operations
|
||||
* *scylla_cdc_operations_on_clustering_row_performed_failed* : Number of failed CDC operations that processed a clustering_row
|
||||
* *scylla_cdc_operations_on_clustering_row_performed_total* : Number of total CDC operations that processed a clustering_row
|
||||
* *scylla_cdc_operations_on_list_performed_failed* : Number of failed CDC operations that processed a list
|
||||
* *scylla_cdc_operations_on_list_performed_total* : Number of total CDC operations that processed a list
|
||||
* *scylla_cdc_operations_on_map_performed_failed* : Number of failed CDC operations that processed a map
|
||||
* *scylla_cdc_operations_on_map_performed_total* : Number of total CDC operations that processed a map
|
||||
* *scylla_cdc_operations_on_partition_delete_performed_failed* : Number of failed CDC operations that processed a partition_delete
|
||||
* *scylla_cdc_operations_on_partition_delete_performed_total* : Number of total CDC operations that processed a partition_delete
|
||||
* *scylla_cdc_operations_on_range_tombstone_performed_failed* : Number of failed CDC operations that processed a range_tombstone
|
||||
* *scylla_cdc_operations_on_range_tombstone_performed_total* : Number of total CDC operations that processed a range_tombstone
|
||||
* *scylla_cdc_operations_on_row_delete_performed_failed* : Number of failed CDC operations that processed a row_delete
|
||||
* *scylla_cdc_operations_on_row_delete_performed_total* : Number of total CDC operations that processed a row_delete
|
||||
* *scylla_cdc_operations_on_set_performed_failed* : Number of failed CDC operations that processed a set
|
||||
* *scylla_cdc_operations_on_set_performed_total* : Number of total CDC operations that processed a set
|
||||
* *scylla_cdc_operations_on_static_row_performed_failed* : Number of failed CDC operations that processed a static_row
|
||||
* *scylla_cdc_operations_on_static_row_performed_total* : Number of total CDC operations that processed a static_row
|
||||
* *scylla_cdc_operations_on_udt_performed_failed* : Number of failed CDC operations that processed a udt
|
||||
* *scylla_cdc_operations_on_udt_performed_total* : Number of total CDC operations that processed a udt
|
||||
* *scylla_cdc_operations_total* : Number of total CDC operations
|
||||
* *scylla_cdc_operations_with_postimage_failed* : Number of failed operations that included postimage
|
||||
* *scylla_cdc_operations_with_postimage_total* : Number of total operations that included postimage
|
||||
* *scylla_cdc_operations_with_preimage_failed* : Number of failed operations that included preimage
|
||||
* *scylla_cdc_operations_with_preimage_total* : Number of total operations that included preimage
|
||||
* *scylla_cdc_preimage_selects_failed* : Number of failed preimage queries performed
|
||||
* *scylla_cdc_preimage_selects_total* : Number of total preimage queries performed
|
||||
* *scylla_compaction_manager_pending_compactions* : Holds the number of compaction tasks waiting for an opportunity to run.
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: Debian 9
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=debian-9&version=stable-release-2020.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2019.1-to-2020.1-ubuntu-and-debian.rst
|
||||
@@ -1,190 +0,0 @@
|
||||
=============================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2019.1 to 2020.1 for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2019.1 to Scylla Enterprise 2020.1, and rollback to 2019.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2019.1.7 or later to Scylla Enterprise version 2020.1.y, on the following platforms:
|
||||
|
||||
.. note::
|
||||
|
||||
This upgrade procedure only works from **2019.1.7** or later. If you have an older Scylla Enterprise 2019.1.x version, please contact the Scylla Support team for advice.
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/enterprise_2020.1_warnings.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2020.1 features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager scheduled or running repairs.
|
||||
* Not to apply schema changes.
|
||||
|
||||
.. include:: /upgrade/_common/upgrade_to_2020_warning.rst
|
||||
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to the upgrade. The upgrade will fail if there is a schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2019.1
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-2019.1-2020.1-rpm-rollback-procedure>` the upgrade. If you are not running a 2019.1.x version, stop right here! This guide only covers 2019.1.x to 2020.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM Enterprise repo <https://www.scylladb.com/customer-portal/?product=ent&platform=centos7&version=stable-release-2020.1>`_ to **2020.1**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla Enterprise 2019.1 to 2020.1<metric-update-2019.1-to-2020.1>`
|
||||
|
||||
.. _upgrade-2019.1-2020.1-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2020.1.x to 2019.1.y. Apply this procedure if an upgrade from 2019.1 to 2020.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2020.1
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2019.1, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM Enterprise 2019.1 repo <https://www.scylladb.com/customer-portal/?product=ent&platform=centos7&version=stable-release-2019.1>`_ to **2019.1**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo rm -rf /var/cache/yum
|
||||
sudo yum remove scylla\*tools-core
|
||||
sudo yum downgrade scylla\* -y
|
||||
sudo yum install scylla-enterprise
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2019.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2020.1 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-ubuntu-16-04/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-16.04&version=stable-release-2020.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2019.1-to-2020.1-ubuntu-and-debian.rst
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: 18.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2019.1-to-2020.1/upgrade-guide-from-2019.1-to-2020.1-ubuntu-18-04/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-18.04&version=stable-release-2020.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2019.1-to-2020.1-ubuntu-and-debian.rst
|
||||
@@ -1,35 +0,0 @@
|
||||
=====================================================
|
||||
Upgrade Scylla Enterprise 2019
|
||||
=====================================================
|
||||
|
||||
.. toctree::
|
||||
:titlesonly:
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2019.x.y-to-2019.x.z-rpm>
|
||||
Ubuntu <upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu>
|
||||
Debian <upgrade-guide-from-2019.x.y-to-2019.x.z-debian>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla Enterprise</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.x.y to 2019.x.z on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2019.x.y-to-2019.x.z-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.x.y to 2019.x.z on Ubuntu <upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2019.x.y to 2019.x.z on Debian <upgrade-guide-from-2019.x.y-to-2019.x.z-debian>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Debian 9
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/debian9/
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,155 +0,0 @@
|
||||
===========================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2019.x.y to 2019.x.z for Red Hat Enterprise 7 or CentOS 7
|
||||
===========================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2019.x.y to 2019.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla Enterprise from the following versions: 2019.x.y to 2019.x.z, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/enterprise_2019.1_warnings.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2019.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2019.x.z
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-2019.x.y-to-2019.x.z-rpm-rollback-procedure>` the upgrade. If you are not running a 2019.x.y version, stop right here! This guide only covers 2019.x.y to 2019.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla Enterprise RPM repo <http://www.scylladb.com/enterprise-download/centos_rpm>`_ to **2019.x**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-2019.x.y-to-2019.x.z-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2019.x.z to 2019.x.y. Apply this procedure if an upgrade from 2019.x.y to 2019.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2019.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2019.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Downgrade to previous release
|
||||
--------------------------------------------------
|
||||
1. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum downgrade scylla\*-2019.x.y -y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2019.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 16.04 or 18.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2019.x.y-to-2019.x.z/upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise deb repo
|
||||
.. _APT: http://www.scylladb.com/enterprise-download/
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2019.x.y-to-2019.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,41 +0,0 @@
|
||||
==================================================
|
||||
Upgrade from Scylla Enterprise 2020.1 to 2021.1
|
||||
==================================================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
:titlesonly:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2020.1-to-2021.1-rpm>
|
||||
Ubuntu 16.04 <upgrade-guide-from-2020.1-to-2021.1-ubuntu-16-04>
|
||||
Ubuntu 18.04 <upgrade-guide-from-2020.1-to-2021.1-ubuntu-18-04>
|
||||
Debian <upgrade-guide-from-2020.1-to-2021.1-debian>
|
||||
Metrics <metric-update-2020.1-to-2021.1>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade to Scylla Enterprise 2021.1</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla Enterprise from 2020.1.x to 2021.1.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2020.1-to-2021.1-rpm>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2020.1.x to 2021.1.y on Ubuntu 16.04 <upgrade-guide-from-2020.1-to-2021.1-ubuntu-16-04>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2020.1.x to 2021.1.y on Ubuntu 18.04 <upgrade-guide-from-2020.1-to-2021.1-ubuntu-18-04>`
|
||||
* :doc:`Upgrade Scylla Enterprise from 2020.1.x to 2021.1.y on Debian <upgrade-guide-from-2020.1-to-2021.1-debian>`
|
||||
* :doc:`Scylla Enterprise Metrics Update - Scylla 2020.1 to 2021.1<metric-update-2020.1-to-2021.1>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
====================================================================
|
||||
Scylla Enterprise Metric Update - Scylla Enterprise 2020.1 to 2021.1
|
||||
====================================================================
|
||||
|
||||
|
||||
The following metrics are new in 2021.1 compared to 2020.1:
|
||||
|
||||
* scylla_commitlog_disk_active_bytes
|
||||
* scylla_compaction_manager_backlog
|
||||
* scylla_hints_for_views_manager_pending_drains
|
||||
* scylla_hints_for_views_manager_pending_sends
|
||||
* scylla_hints_manager_pending_drains
|
||||
* scylla_hints_manager_pending_sends
|
||||
* scylla_sstables_index_page_cache_bytes
|
||||
* scylla_sstables_index_page_cache_evictions
|
||||
* scylla_sstables_index_page_cache_hits
|
||||
* scylla_sstables_index_page_cache_misses
|
||||
* scylla_sstables_index_page_cache_populations
|
||||
* scylla_sstables_pi_cache_block_count
|
||||
* scylla_sstables_pi_cache_bytes
|
||||
* scylla_sstables_pi_cache_evictions
|
||||
* scylla_sstables_pi_cache_hits_l0
|
||||
* scylla_sstables_pi_cache_hits_l1
|
||||
* scylla_sstables_pi_cache_hits_l2
|
||||
* scylla_sstables_pi_cache_misses_l0
|
||||
* scylla_sstables_pi_cache_misses_l1
|
||||
* scylla_sstables_pi_cache_misses_l2
|
||||
* scylla_sstables_pi_cache_populations
|
||||
* scylla_storage_proxy_coordinator_background_replica_writes_failed_remote_node
|
||||
* scylla_storage_proxy_coordinator_cas_background
|
||||
* scylla_storage_proxy_coordinator_cas_foreground
|
||||
* scylla_storage_proxy_coordinator_cas_total_operations
|
||||
* scylla_storage_proxy_coordinator_total_write_attempts_remote_node
|
||||
* scylla_transport_requests_shed
|
||||
* scylla_view_builder_builds_in_progress
|
||||
* scylla_view_builder_pending_bookkeeping_ops
|
||||
* scylla_view_builder_steps_failed
|
||||
* scylla_view_builder_steps_performed
|
||||
* scylla_view_update_generator_pending_registrations
|
||||
* scylla_view_update_generator_queued_batches_count
|
||||
* scylla_view_update_generator_sstables_to_move_count
|
||||
|
||||
The following metrics are not longer available in 2021.1 compared to 2020.1:
|
||||
|
||||
* scylla_lsa_segments_migrated
|
||||
* scylla_reactor_io_queue_requests
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: Debian 9
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=debian-9&version=stable-release-2021.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2020.1-to-2021.1-ubuntu-and-debian.rst
|
||||
@@ -1,190 +0,0 @@
|
||||
=============================================================================================
|
||||
Upgrade Guide - Scylla Enterprise 2020.1 to 2021.1 for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla Enterprise 2020.1 to Scylla Enterprise 2021.1, and rollback to 2020.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: **2020.1.8** or later to Scylla Enterprise version 2021.1.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
A Scylla upgrade is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2021.1 features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager scheduled or running repairs.
|
||||
* Not to apply schema changes.
|
||||
|
||||
.. include:: /upgrade/_common/upgrade_to_2021_warning.rst
|
||||
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to the upgrade. The upgrade will fail if there is a schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2020.1
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa scylla\*server``. You should use the same version in case you want to :ref:`rollback <upgrade-2020.1-2021.1-rpm-rollback-procedure>` the upgrade. If you are not running a 2020.1.x version, stop right here! This guide only covers 2020.1.x to 2021.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM Enterprise repo <https://www.scylladb.com/customer-portal/?product=ent&platform=centos7&version=stable-release-2021.1>`_ to **2021.1**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
New io.conf format was introduced in Scylla 2.3 and 2019.1. If your io.conf doesn't contain `--io-properties-file` option, then it's still the old format, you need to re-run the io setup to generate new io.conf.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo scylla_io_setup
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla Enterprise 2020.1 to 2021.1<metric-update-2020.1-to-2021.1>`
|
||||
|
||||
.. _upgrade-2020.1-2021.1-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla Enterprise release 2021.1.x to 2020.1.y. Apply this procedure if an upgrade from 2020.1 to 2021.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2021.1
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2020.1, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM Enterprise 2020.1 repo <https://www.scylladb.com/customer-portal/?product=ent&platform=centos7&version=stable-release-2020.1>`_ to **2020.1**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo rm -rf /var/cache/yum
|
||||
sudo yum remove scylla\*tools-core
|
||||
sudo yum downgrade scylla\* -y
|
||||
sudo yum install scylla-enterprise
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2020.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2021.1 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2020.1-to-2021.1/upgrade-guide-from-2020.1-to-2021.1-ubuntu-16-04/#rollback-procedure
|
||||
.. |APT| replace:: Scylla Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-16.04&version=stable-release-2021.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2020.1-to-2021.1-ubuntu-and-debian.rst
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user