Compare commits
123 Commits
next
...
scylla-5.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf42ca0c2a | ||
|
|
62d8c7274a | ||
|
|
8080c15d7a | ||
|
|
8398f361cd | ||
|
|
dba6070794 | ||
|
|
0a6a52e08c | ||
|
|
25c0510015 | ||
|
|
311e31b36f | ||
|
|
6a6a4fde79 | ||
|
|
390414c99e | ||
|
|
26b812067b | ||
|
|
e83c4cc75c | ||
|
|
df1843311a | ||
|
|
fcaae2ea78 | ||
|
|
a1b6edd5d3 | ||
|
|
6c625e8cd3 | ||
|
|
10df72ed04 | ||
|
|
d4788406d4 | ||
|
|
081a36e34f | ||
|
|
c0c7de8fd1 | ||
|
|
aee9947f6c | ||
|
|
6fdfec5282 | ||
|
|
50a5c5379a | ||
|
|
938b993331 | ||
|
|
7971abb8e3 | ||
|
|
65fb562ae3 | ||
|
|
97a9f1dc7b | ||
|
|
7f629df6fd | ||
|
|
3ff8051532 | ||
|
|
e5dcef32ef | ||
|
|
199cfd0784 | ||
|
|
5d88e997ef | ||
|
|
7bb6386c14 | ||
|
|
993e6997c0 | ||
|
|
8b487be054 | ||
|
|
346e883dfc | ||
|
|
ceffbdf832 | ||
|
|
da6a87057f | ||
|
|
ffb580df71 | ||
|
|
a983c009cb | ||
|
|
00f04e0f94 | ||
|
|
0ebcc21193 | ||
|
|
ff596f9d9d | ||
|
|
e3153dd5b0 | ||
|
|
f126ccb2e9 | ||
|
|
d8586fd101 | ||
|
|
a228d09017 | ||
|
|
c5f2095f6e | ||
|
|
3d22f42cf9 | ||
|
|
8ca5794756 | ||
|
|
abeeefb427 | ||
|
|
9c482ff262 | ||
|
|
bfc98d1909 | ||
|
|
2cef52aeaa | ||
|
|
a55561fc64 | ||
|
|
7288bdfe09 | ||
|
|
ac7ed6857a | ||
|
|
bc8ff68cf6 | ||
|
|
0974ef893e | ||
|
|
9fc4c265a5 | ||
|
|
0518e47daf | ||
|
|
1e8eb6172a | ||
|
|
14814c972e | ||
|
|
7a67db594a | ||
|
|
5434fcb5a8 | ||
|
|
b4ef2248cc | ||
|
|
21996e12ae | ||
|
|
df7b96a092 | ||
|
|
5df85094d9 | ||
|
|
a0ca8900e1 | ||
|
|
98bd287177 | ||
|
|
c4a249022f | ||
|
|
58a89e7a42 | ||
|
|
1a0424db01 | ||
|
|
6d7919041b | ||
|
|
17c15f6222 | ||
|
|
91d1c9153b | ||
|
|
95364e2454 | ||
|
|
6d779f58a9 | ||
|
|
b956646ba2 | ||
|
|
62b93018ac | ||
|
|
b0410c9391 | ||
|
|
6f073dfa54 | ||
|
|
a24b53e6bb | ||
|
|
219adcea71 | ||
|
|
6d01d01deb | ||
|
|
b1f54efc2d | ||
|
|
bc1202aab2 | ||
|
|
2cb709461c | ||
|
|
44c72f6e56 | ||
|
|
6943447c6a | ||
|
|
b259bb43c6 | ||
|
|
88e96def63 | ||
|
|
187e275147 | ||
|
|
7926e4e7eb | ||
|
|
23e4762baa | ||
|
|
1dad9cdfdf | ||
|
|
1bee785734 | ||
|
|
df61c2c2ce | ||
|
|
20b5896b7a | ||
|
|
eff8157cea | ||
|
|
043dd5cc12 | ||
|
|
3f66f18f85 | ||
|
|
9c7454993f | ||
|
|
7c38cd9359 | ||
|
|
66898b2144 | ||
|
|
9e33771e1b | ||
|
|
b25859d6de | ||
|
|
38a3fd4708 | ||
|
|
66be0fc1eb | ||
|
|
4345b26eb2 | ||
|
|
c4e8557afa | ||
|
|
6d91d560ec | ||
|
|
9c37f5e02f | ||
|
|
ed7b3e2325 | ||
|
|
1100a0b176 | ||
|
|
2aa29763af | ||
|
|
24efacf90d | ||
|
|
1639a468df | ||
|
|
a0766ac236 | ||
|
|
fa0f382a82 | ||
|
|
37fd8a4c36 | ||
|
|
83f7d0073a |
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "seastar"]
|
||||
path = seastar
|
||||
url = ../seastar
|
||||
url = ../scylla-seastar
|
||||
ignore = dirty
|
||||
[submodule "swagger-ui"]
|
||||
path = swagger-ui
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.4.0-dev
|
||||
VERSION=5.4.3
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -84,6 +84,14 @@
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when the table is flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"split_output",
|
||||
"description":"true if the output of the major compaction should be split in several sstables",
|
||||
|
||||
43
api/api-doc/raft.json
Normal file
43
api/api-doc/raft.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/raft",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/raft/trigger_snapshot/{group_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Triggers snapshot creation and log truncation for the given Raft group",
|
||||
"type":"string",
|
||||
"nickname":"trigger_snapshot",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"group_id",
|
||||
"description":"The ID of the group which should get snapshotted",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"Timeout in seconds after which the endpoint returns a failure. If not provided, 60s is used.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -701,6 +701,30 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/compact",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Forces major compaction in all keyspaces",
|
||||
"type":"void",
|
||||
"nickname":"force_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when tables were flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/keyspace_compaction/{keyspace}",
|
||||
"operations":[
|
||||
@@ -728,6 +752,14 @@
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when tables were flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -912,6 +944,21 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/flush",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Flush all memtables in all keyspaces.",
|
||||
"type":"void",
|
||||
"nickname":"force_flush",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/keyspace_flush/{keyspace}",
|
||||
"operations":[
|
||||
|
||||
13
api/api.cc
13
api/api.cc
@@ -31,6 +31,7 @@
|
||||
#include "api/config.hh"
|
||||
#include "task_manager.hh"
|
||||
#include "task_manager_test.hh"
|
||||
#include "raft.hh"
|
||||
|
||||
logging::logger apilog("api");
|
||||
|
||||
@@ -294,6 +295,18 @@ future<> set_server_task_manager_test(http_context& ctx) {
|
||||
|
||||
#endif
|
||||
|
||||
future<> set_server_raft(http_context& ctx, sharded<service::raft_group_registry>& raft_gr) {
|
||||
auto rb = std::make_shared<api_registry_builder>(ctx.api_doc);
|
||||
return ctx.http_server.set_routes([rb, &ctx, &raft_gr] (routes& r) {
|
||||
rb->register_function(r, "raft", "The Raft API");
|
||||
set_raft(ctx, r, raft_gr);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_raft(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_raft(ctx, r); });
|
||||
}
|
||||
|
||||
void req_params::process(const request& req) {
|
||||
// Process mandatory parameters
|
||||
for (auto& [name, ent] : params) {
|
||||
|
||||
@@ -23,6 +23,7 @@ class load_meter;
|
||||
class storage_proxy;
|
||||
class storage_service;
|
||||
class raft_group0_client;
|
||||
class raft_group_registry;
|
||||
|
||||
} // namespace service
|
||||
|
||||
@@ -117,5 +118,7 @@ future<> set_server_compaction_manager(http_context& ctx);
|
||||
future<> set_server_done(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx, lw_shared_ptr<db::config> cfg);
|
||||
future<> set_server_task_manager_test(http_context& ctx);
|
||||
future<> set_server_raft(http_context&, sharded<service::raft_group_registry>&);
|
||||
future<> unset_server_raft(http_context&);
|
||||
|
||||
}
|
||||
|
||||
@@ -1047,12 +1047,19 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::force_major_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
if (req->get_query_param("split_output") != "") {
|
||||
auto params = req_params({
|
||||
std::pair("name", mandatory::yes),
|
||||
std::pair("flush_memtables", mandatory::no),
|
||||
std::pair("split_output", mandatory::no),
|
||||
});
|
||||
params.process(*req);
|
||||
if (params.get("split_output")) {
|
||||
fail(unimplemented::cause::API);
|
||||
}
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(*params.get("name"));
|
||||
auto flush = params.get_as<bool>("flush_memtables").value_or(true);
|
||||
apilog.info("column_family/force_major_compaction: name={} flush={}", req->param["name"], flush);
|
||||
|
||||
apilog.info("column_family/force_major_compaction: name={}", req->param["name"]);
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->param["name"]);
|
||||
auto keyspace = validate_keyspace(ctx, ks);
|
||||
std::vector<table_info> table_infos = {table_info{
|
||||
.name = cf,
|
||||
@@ -1060,7 +1067,11 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
}};
|
||||
|
||||
auto& compaction_module = ctx.db.local().get_compaction_manager().get_task_manager_module();
|
||||
auto task = co_await compaction_module.make_and_start_task<major_keyspace_compaction_task_impl>({}, std::move(keyspace), ctx.db, std::move(table_infos));
|
||||
std::optional<major_compaction_task_impl::flush_mode> fmopt;
|
||||
if (!flush) {
|
||||
fmopt = major_compaction_task_impl::flush_mode::skip;
|
||||
}
|
||||
auto task = co_await compaction_module.make_and_start_task<major_keyspace_compaction_task_impl>({}, std::move(keyspace), tasks::task_id::create_null_id(), ctx.db, std::move(table_infos), fmopt);
|
||||
co_await task->done();
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
@@ -18,37 +18,43 @@ namespace fd = httpd::failure_detector_json;
|
||||
|
||||
void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
fd::get_all_endpoint_states.set(r, [&g](std::unique_ptr<request> req) {
|
||||
std::vector<fd::endpoint_state> res;
|
||||
res.reserve(g.num_endpoints());
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& addr, const gms::endpoint_state& eps) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = fmt::to_string(addr);
|
||||
val.is_alive = g.is_alive(addr);
|
||||
val.generation = eps.get_heart_beat_state().get_generation().value();
|
||||
val.version = eps.get_heart_beat_state().get_heart_beat_version().value();
|
||||
val.update_time = eps.get_update_timestamp().time_since_epoch().count();
|
||||
for (const auto& [as_type, app_state] : eps.get_application_state_map()) {
|
||||
fd::version_value version_val;
|
||||
// We return the enum index and not it's name to stay compatible to origin
|
||||
// method that the state index are static but the name can be changed.
|
||||
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(as_type);
|
||||
version_val.value = app_state.value();
|
||||
version_val.version = app_state.version().value();
|
||||
val.application_state.push(version_val);
|
||||
}
|
||||
res.emplace_back(std::move(val));
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::vector<fd::endpoint_state> res;
|
||||
res.reserve(g.num_endpoints());
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& addr, const gms::endpoint_state& eps) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = fmt::to_string(addr);
|
||||
val.is_alive = g.is_alive(addr);
|
||||
val.generation = eps.get_heart_beat_state().get_generation().value();
|
||||
val.version = eps.get_heart_beat_state().get_heart_beat_version().value();
|
||||
val.update_time = eps.get_update_timestamp().time_since_epoch().count();
|
||||
for (const auto& [as_type, app_state] : eps.get_application_state_map()) {
|
||||
fd::version_value version_val;
|
||||
// We return the enum index and not it's name to stay compatible to origin
|
||||
// method that the state index are static but the name can be changed.
|
||||
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(as_type);
|
||||
version_val.value = app_state.value();
|
||||
version_val.version = app_state.version().value();
|
||||
val.application_state.push(version_val);
|
||||
}
|
||||
res.emplace_back(std::move(val));
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
fd::get_up_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
int res = g.get_up_endpoint_count();
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
int res = g.get_up_endpoint_count();
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_down_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
int res = g.get_down_endpoint_count();
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
int res = g.get_down_endpoint_count();
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_phi_convict_threshold.set(r, [] (std::unique_ptr<request> req) {
|
||||
@@ -56,11 +62,13 @@ void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
});
|
||||
|
||||
fd::get_simple_states.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
std::map<sstring, sstring> nodes_status;
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& node, const gms::endpoint_state&) {
|
||||
nodes_status.emplace(node.to_sstring(), g.is_alive(node) ? "UP" : "DOWN");
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::map<sstring, sstring> nodes_status;
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& node, const gms::endpoint_state&) {
|
||||
nodes_status.emplace(node.to_sstring(), g.is_alive(node) ? "UP" : "DOWN");
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(nodes_status));
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(nodes_status));
|
||||
});
|
||||
|
||||
fd::set_phi_convict_threshold.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -71,13 +79,15 @@ void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
});
|
||||
|
||||
fd::get_endpoint_state.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
auto state = g.get_endpoint_state_ptr(gms::inet_address(req->param["addr"]));
|
||||
if (!state) {
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->param["addr"]));
|
||||
}
|
||||
std::stringstream ss;
|
||||
g.append_endpoint_state(ss, *state);
|
||||
return make_ready_future<json::json_return_type>(sstring(ss.str()));
|
||||
return g.container().invoke_on(0, [req = std::move(req)] (gms::gossiper& g) {
|
||||
auto state = g.get_endpoint_state_ptr(gms::inet_address(req->param["addr"]));
|
||||
if (!state) {
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->param["addr"]));
|
||||
}
|
||||
std::stringstream ss;
|
||||
g.append_endpoint_state(ss, *state);
|
||||
return make_ready_future<json::json_return_type>(sstring(ss.str()));
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_phi_values.set(r, [](std::unique_ptr<request> req) {
|
||||
|
||||
70
api/raft.cc
Normal file
70
api/raft.cc
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (C) 2024-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "api/api.hh"
|
||||
#include "api/api-doc/raft.json.hh"
|
||||
|
||||
#include "service/raft/raft_group_registry.hh"
|
||||
|
||||
using namespace seastar::httpd;
|
||||
|
||||
extern logging::logger apilog;
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace r = httpd::raft_json;
|
||||
using namespace json;
|
||||
|
||||
void set_raft(http_context&, httpd::routes& r, sharded<service::raft_group_registry>& raft_gr) {
|
||||
r::trigger_snapshot.set(r, [&raft_gr] (std::unique_ptr<http::request> req) -> future<json_return_type> {
|
||||
raft::group_id gid{utils::UUID{req->param["group_id"]}};
|
||||
auto timeout_dur = std::invoke([timeout_str = req->get_query_param("timeout")] {
|
||||
if (timeout_str.empty()) {
|
||||
return std::chrono::seconds{60};
|
||||
}
|
||||
auto dur = std::stoll(timeout_str);
|
||||
if (dur <= 0) {
|
||||
throw std::runtime_error{"Timeout must be a positive number."};
|
||||
}
|
||||
return std::chrono::seconds{dur};
|
||||
});
|
||||
|
||||
std::atomic<bool> found_srv{false};
|
||||
co_await raft_gr.invoke_on_all([gid, timeout_dur, &found_srv] (service::raft_group_registry& raft_gr) -> future<> {
|
||||
auto* srv = raft_gr.find_server(gid);
|
||||
if (!srv) {
|
||||
co_return;
|
||||
}
|
||||
|
||||
found_srv = true;
|
||||
abort_on_expiry aoe(lowres_clock::now() + timeout_dur);
|
||||
apilog.info("Triggering Raft group {} snapshot", gid);
|
||||
auto result = co_await srv->trigger_snapshot(&aoe.abort_source());
|
||||
if (result) {
|
||||
apilog.info("New snapshot for Raft group {} created", gid);
|
||||
} else {
|
||||
apilog.info("Could not create new snapshot for Raft group {}, no new entries applied", gid);
|
||||
}
|
||||
});
|
||||
|
||||
if (!found_srv) {
|
||||
throw std::runtime_error{fmt::format("Server for group ID {} not found", gid)};
|
||||
}
|
||||
|
||||
co_return json_void{};
|
||||
});
|
||||
}
|
||||
|
||||
void unset_raft(http_context&, httpd::routes& r) {
|
||||
r::trigger_snapshot.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
18
api/raft.hh
Normal file
18
api/raft.hh
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
* Copyright (C) 2023-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api_init.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_raft(http_context& ctx, httpd::routes& r, sharded<service::raft_group_registry>& raft_gr);
|
||||
void unset_raft(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
@@ -250,17 +250,21 @@ future<json::json_return_type> set_tables_tombstone_gc(http_context& ctx, const
|
||||
}
|
||||
|
||||
void set_transport_controller(http_context& ctx, routes& r, cql_transport::controller& ctl) {
|
||||
ss::start_native_transport.set(r, [&ctl](std::unique_ptr<http::request> req) {
|
||||
ss::start_native_transport.set(r, [&ctx, &ctl](std::unique_ptr<http::request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.start_server();
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] {
|
||||
return ctl.start_server();
|
||||
});
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_native_transport.set(r, [&ctl](std::unique_ptr<http::request> req) {
|
||||
ss::stop_native_transport.set(r, [&ctx, &ctl](std::unique_ptr<http::request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.request_stop_server();
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] {
|
||||
return ctl.request_stop_server();
|
||||
});
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -282,17 +286,21 @@ void unset_transport_controller(http_context& ctx, routes& r) {
|
||||
}
|
||||
|
||||
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl) {
|
||||
ss::stop_rpc_server.set(r, [&ctl](std::unique_ptr<http::request> req) {
|
||||
ss::stop_rpc_server.set(r, [&ctx, &ctl](std::unique_ptr<http::request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.request_stop_server();
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] {
|
||||
return ctl.request_stop_server();
|
||||
});
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_rpc_server.set(r, [&ctl](std::unique_ptr<http::request> req) {
|
||||
ss::start_rpc_server.set(r, [&ctx, &ctl](std::unique_ptr<http::request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.start_server();
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] {
|
||||
return ctl.start_server();
|
||||
});
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -669,14 +677,50 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
});
|
||||
|
||||
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
ss::force_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& db = ctx.db;
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto table_infos = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
apilog.debug("force_keyspace_compaction: keyspace={} tables={}", keyspace, table_infos);
|
||||
auto params = req_params({
|
||||
std::pair("flush_memtables", mandatory::no),
|
||||
});
|
||||
params.process(*req);
|
||||
auto flush = params.get_as<bool>("flush_memtables").value_or(true);
|
||||
apilog.info("force_compaction: flush={}", flush);
|
||||
|
||||
auto& compaction_module = db.local().get_compaction_manager().get_task_manager_module();
|
||||
auto task = co_await compaction_module.make_and_start_task<major_keyspace_compaction_task_impl>({}, std::move(keyspace), db, table_infos);
|
||||
std::optional<major_compaction_task_impl::flush_mode> fmopt;
|
||||
if (!flush) {
|
||||
fmopt = major_compaction_task_impl::flush_mode::skip;
|
||||
}
|
||||
auto task = co_await compaction_module.make_and_start_task<global_major_compaction_task_impl>({}, db, fmopt);
|
||||
try {
|
||||
co_await task->done();
|
||||
} catch (...) {
|
||||
apilog.error("force_compaction failed: {}", std::current_exception());
|
||||
throw;
|
||||
}
|
||||
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& db = ctx.db;
|
||||
auto params = req_params({
|
||||
std::pair("keyspace", mandatory::yes),
|
||||
std::pair("cf", mandatory::no),
|
||||
std::pair("flush_memtables", mandatory::no),
|
||||
});
|
||||
params.process(*req);
|
||||
auto keyspace = validate_keyspace(ctx, *params.get("keyspace"));
|
||||
auto table_infos = parse_table_infos(keyspace, ctx, params.get("cf").value_or(""));
|
||||
auto flush = params.get_as<bool>("flush_memtables").value_or(true);
|
||||
apilog.debug("force_keyspace_compaction: keyspace={} tables={}, flush={}", keyspace, table_infos, flush);
|
||||
|
||||
auto& compaction_module = db.local().get_compaction_manager().get_task_manager_module();
|
||||
std::optional<major_compaction_task_impl::flush_mode> fmopt;
|
||||
if (!flush) {
|
||||
fmopt = major_compaction_task_impl::flush_mode::skip;
|
||||
}
|
||||
auto task = co_await compaction_module.make_and_start_task<major_keyspace_compaction_task_impl>({}, std::move(keyspace), tasks::task_id::create_null_id(), db, table_infos, fmopt);
|
||||
try {
|
||||
co_await task->done();
|
||||
} catch (...) {
|
||||
@@ -743,6 +787,14 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
co_return json::json_return_type(0);
|
||||
}));
|
||||
|
||||
ss::force_flush.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
apilog.info("flush all tables");
|
||||
co_await ctx.db.invoke_on_all([] (replica::database& db) {
|
||||
return db.flush_all_tables();
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
ss::force_keyspace_flush.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
@@ -1387,10 +1439,12 @@ void unset_storage_service(http_context& ctx, routes& r) {
|
||||
ss::get_current_generation_number.unset(r);
|
||||
ss::get_natural_endpoints.unset(r);
|
||||
ss::cdc_streams_check_and_repair.unset(r);
|
||||
ss::force_compaction.unset(r);
|
||||
ss::force_keyspace_compaction.unset(r);
|
||||
ss::force_keyspace_cleanup.unset(r);
|
||||
ss::perform_keyspace_offstrategy_compaction.unset(r);
|
||||
ss::upgrade_sstables.unset(r);
|
||||
ss::force_flush.unset(r);
|
||||
ss::force_keyspace_flush.unset(r);
|
||||
ss::decommission.unset(r);
|
||||
ss::move.unset(r);
|
||||
|
||||
@@ -232,8 +232,8 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
while (!q.empty()) {
|
||||
auto& current = q.front();
|
||||
res.push_back(co_await retrieve_status(current));
|
||||
for (size_t i = 0; i < current->get_children().size(); ++i) {
|
||||
q.push(co_await current->get_children()[i].copy());
|
||||
for (auto& child: current->get_children()) {
|
||||
q.push(co_await child.copy());
|
||||
}
|
||||
q.pop();
|
||||
}
|
||||
|
||||
@@ -245,6 +245,8 @@ future<authenticated_user> password_authenticator::authenticate(
|
||||
std::throw_with_nested(exceptions::authentication_exception(e.what()));
|
||||
} catch (exceptions::authentication_exception& e) {
|
||||
std::throw_with_nested(e);
|
||||
} catch (exceptions::unavailable_exception& e) {
|
||||
std::throw_with_nested(exceptions::authentication_exception(e.get_message()));
|
||||
} catch (...) {
|
||||
std::throw_with_nested(exceptions::authentication_exception("authentication failed"));
|
||||
}
|
||||
|
||||
@@ -98,7 +98,16 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
bool _next_row_in_range = false;
|
||||
bool _has_rt = false;
|
||||
|
||||
// True iff current population interval, since the previous clustering row, starts before all clustered rows.
|
||||
// True iff current population interval starts at before_all_clustered_rows
|
||||
// and _last_row is unset. (And the read isn't reverse).
|
||||
//
|
||||
// Rationale: in the "most general" step of cache population,
|
||||
// we mark the `(_last_row, ...] `range as continuous, which can involve doing something to `_last_row`.
|
||||
// But when populating the range `(before_all_clustered_rows, ...)`,
|
||||
// a rows_entry at `before_all_clustered_rows` needn't exist.
|
||||
// Thus this case needs a special treatment which doesn't involve `_last_row`.
|
||||
// And for that, this case it has to be recognized (via this flag).
|
||||
//
|
||||
// We cannot just look at _lower_bound, because emission of range tombstones changes _lower_bound and
|
||||
// because we mark clustering intervals as continuous when consuming a clustering_row, it would prevent
|
||||
// us from marking the interval as continuous.
|
||||
@@ -147,6 +156,8 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
bool maybe_add_to_cache(const range_tombstone_change& rtc);
|
||||
void maybe_add_to_cache(const static_row& sr);
|
||||
void maybe_set_static_row_continuous();
|
||||
void set_rows_entry_continuous(rows_entry& e);
|
||||
void restore_continuity_after_insertion(const mutation_partition::rows_type::iterator&);
|
||||
void finish_reader() {
|
||||
push_mutation_fragment(*_schema, _permit, partition_end());
|
||||
_end_of_stream = true;
|
||||
@@ -341,7 +352,7 @@ future<> cache_flat_mutation_reader::do_fill_buffer() {
|
||||
});
|
||||
}
|
||||
_state = state::reading_from_underlying;
|
||||
_population_range_starts_before_all_rows = _lower_bound.is_before_all_clustered_rows(*_schema) && !_read_context.is_reversed();
|
||||
_population_range_starts_before_all_rows = _lower_bound.is_before_all_clustered_rows(*_schema) && !_read_context.is_reversed() && !_last_row;
|
||||
_underlying_upper_bound = _next_row_in_range ? position_in_partition::before_key(_next_row.position())
|
||||
: position_in_partition(_upper_bound);
|
||||
if (!_read_context.partition_exists()) {
|
||||
@@ -463,14 +474,15 @@ future<> cache_flat_mutation_reader::read_from_underlying() {
|
||||
if (insert_result.second) {
|
||||
clogger.trace("csm {}: L{}: inserted dummy at {}", fmt::ptr(this), __LINE__, _upper_bound);
|
||||
_snp->tracker()->insert(*insert_result.first);
|
||||
restore_continuity_after_insertion(insert_result.first);
|
||||
}
|
||||
if (_read_context.is_reversed()) [[unlikely]] {
|
||||
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), _last_row.position(), insert_result.first->position(), _current_tombstone);
|
||||
_last_row->set_continuous(true);
|
||||
set_rows_entry_continuous(*_last_row);
|
||||
_last_row->set_range_tombstone(_current_tombstone);
|
||||
} else {
|
||||
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(), _last_row.position(), _current_tombstone);
|
||||
insert_result.first->set_continuous(true);
|
||||
set_rows_entry_continuous(*insert_result.first);
|
||||
insert_result.first->set_range_tombstone(_current_tombstone);
|
||||
}
|
||||
maybe_drop_last_entry(_current_tombstone);
|
||||
@@ -505,11 +517,11 @@ bool cache_flat_mutation_reader::ensure_population_lower_bound() {
|
||||
rows_entry::tri_compare cmp(*_schema);
|
||||
partition_snapshot_row_cursor cur(*_schema, *_snp, false, _read_context.is_reversed());
|
||||
|
||||
if (!cur.advance_to(_last_row.position())) {
|
||||
if (!cur.advance_to(to_query_domain(_last_row.position()))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (cmp(cur.position(), _last_row.position()) != 0) {
|
||||
if (cmp(cur.table_position(), _last_row.position()) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -531,7 +543,7 @@ void cache_flat_mutation_reader::maybe_update_continuity() {
|
||||
position_in_partition::equal_compare eq(*_schema);
|
||||
if (can_populate()
|
||||
&& ensure_population_lower_bound()
|
||||
&& !eq(_last_row.position(), _next_row.position())) {
|
||||
&& !eq(_last_row.position(), _next_row.table_position())) {
|
||||
with_allocator(_snp->region().allocator(), [&] {
|
||||
rows_entry& e = _next_row.ensure_entry_in_latest().row;
|
||||
auto& rows = _snp->version()->partition().mutable_clustered_rows();
|
||||
@@ -553,14 +565,14 @@ void cache_flat_mutation_reader::maybe_update_continuity() {
|
||||
}
|
||||
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(),
|
||||
_last_row.position(), _current_tombstone);
|
||||
insert_result.first->set_continuous(true);
|
||||
set_rows_entry_continuous(*insert_result.first);
|
||||
insert_result.first->set_range_tombstone(_current_tombstone);
|
||||
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), _last_row.position());
|
||||
_last_row->set_continuous(true);
|
||||
set_rows_entry_continuous(*_last_row);
|
||||
});
|
||||
} else {
|
||||
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), _last_row.position(), _current_tombstone);
|
||||
_last_row->set_continuous(true);
|
||||
set_rows_entry_continuous(*_last_row);
|
||||
_last_row->set_range_tombstone(_current_tombstone);
|
||||
}
|
||||
} else {
|
||||
@@ -578,18 +590,18 @@ void cache_flat_mutation_reader::maybe_update_continuity() {
|
||||
if (insert_result.second) {
|
||||
clogger.trace("csm {}: L{}: inserted dummy at {}", fmt::ptr(this), __LINE__, insert_result.first->position());
|
||||
_snp->tracker()->insert(*insert_result.first);
|
||||
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(),
|
||||
_last_row.position(), _current_tombstone);
|
||||
set_rows_entry_continuous(*insert_result.first);
|
||||
insert_result.first->set_range_tombstone(_current_tombstone);
|
||||
}
|
||||
clogger.trace("csm {}: set_continuous({}), prev={}, rt={}", fmt::ptr(this), insert_result.first->position(),
|
||||
_last_row.position(), _current_tombstone);
|
||||
insert_result.first->set_continuous(true);
|
||||
insert_result.first->set_range_tombstone(_current_tombstone);
|
||||
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), e.position());
|
||||
e.set_continuous(true);
|
||||
set_rows_entry_continuous(e);
|
||||
});
|
||||
} else {
|
||||
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), e.position(), _current_tombstone);
|
||||
e.set_range_tombstone(_current_tombstone);
|
||||
e.set_continuous(true);
|
||||
set_rows_entry_continuous(e);
|
||||
}
|
||||
}
|
||||
maybe_drop_last_entry(_current_tombstone);
|
||||
@@ -625,20 +637,21 @@ void cache_flat_mutation_reader::maybe_add_to_cache(const clustering_row& cr) {
|
||||
it = insert_result.first;
|
||||
if (insert_result.second) {
|
||||
_snp->tracker()->insert(*it);
|
||||
restore_continuity_after_insertion(it);
|
||||
}
|
||||
|
||||
rows_entry& e = *it;
|
||||
if (ensure_population_lower_bound()) {
|
||||
if (_read_context.is_reversed()) [[unlikely]] {
|
||||
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), _last_row.position());
|
||||
_last_row->set_continuous(true);
|
||||
set_rows_entry_continuous(*_last_row);
|
||||
// _current_tombstone must also apply to _last_row itself (if it's non-dummy)
|
||||
// because otherwise there would be a rtc after it, either creating a different entry,
|
||||
// or clearing _last_row if population did not happen.
|
||||
_last_row->set_range_tombstone(_current_tombstone);
|
||||
} else {
|
||||
clogger.trace("csm {}: set_continuous({})", fmt::ptr(this), e.position());
|
||||
e.set_continuous(true);
|
||||
set_rows_entry_continuous(e);
|
||||
e.set_range_tombstone(_current_tombstone);
|
||||
}
|
||||
} else {
|
||||
@@ -689,20 +702,25 @@ bool cache_flat_mutation_reader::maybe_add_to_cache(const range_tombstone_change
|
||||
it = insert_result.first;
|
||||
if (insert_result.second) {
|
||||
_snp->tracker()->insert(*it);
|
||||
restore_continuity_after_insertion(it);
|
||||
}
|
||||
|
||||
rows_entry& e = *it;
|
||||
if (ensure_population_lower_bound()) {
|
||||
// underlying may emit range_tombstone_change fragments with the same position.
|
||||
// In such case, the range to which the tombstone from the first fragment applies is empty and should be ignored.
|
||||
if (q_cmp(_last_row.position(), it->position()) < 0) {
|
||||
//
|
||||
// Note: we are using a query schema comparator to compare table schema positions here,
|
||||
// but this is okay because we are only checking for equality,
|
||||
// which is preserved by schema reversals.
|
||||
if (q_cmp(_last_row.position(), it->position()) != 0) {
|
||||
if (_read_context.is_reversed()) [[unlikely]] {
|
||||
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), _last_row.position(), prev);
|
||||
_last_row->set_continuous(true);
|
||||
set_rows_entry_continuous(*_last_row);
|
||||
_last_row->set_range_tombstone(prev);
|
||||
} else {
|
||||
clogger.trace("csm {}: set_continuous({}), rt={}", fmt::ptr(this), e.position(), prev);
|
||||
e.set_continuous(true);
|
||||
set_rows_entry_continuous(e);
|
||||
e.set_range_tombstone(prev);
|
||||
}
|
||||
}
|
||||
@@ -1041,6 +1059,28 @@ void cache_flat_mutation_reader::maybe_set_static_row_continuous() {
|
||||
}
|
||||
}
|
||||
|
||||
// Last dummies can exist in a quasi-evicted state, where they are unlinked from LRU,
|
||||
// but still alive.
|
||||
// But while in this state, they mustn't carry any information (i.e. continuity),
|
||||
// due to the "older versions are evicted first" rule of MVCC.
|
||||
// Thus, when we make an entry continuous, we must ensure that it isn't an
|
||||
// unlinked last dummy.
|
||||
inline
|
||||
void cache_flat_mutation_reader::set_rows_entry_continuous(rows_entry& e) {
|
||||
e.set_continuous(true);
|
||||
if (!e.is_linked()) [[unlikely]] {
|
||||
_snp->tracker()->touch(e);
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::restore_continuity_after_insertion(const mutation_partition::rows_type::iterator& it) {
|
||||
if (auto x = std::next(it); x->continuous()) {
|
||||
it->set_continuous(true);
|
||||
it->set_range_tombstone(x->range_tombstone());
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
bool cache_flat_mutation_reader::can_populate() const {
|
||||
return _snp->at_latest_version() && _read_context.cache().phase_of(_read_context.key()) == _read_context.phase();
|
||||
|
||||
@@ -505,7 +505,7 @@ protected:
|
||||
auto max_sstable_size = std::max<uint64_t>(_max_sstable_size, 1);
|
||||
uint64_t estimated_sstables = std::max(1UL, uint64_t(ceil(double(_compacting_data_file_size) / max_sstable_size)));
|
||||
return std::min(uint64_t(ceil(double(_estimated_partitions) / estimated_sstables)),
|
||||
_table_s.get_compaction_strategy().adjust_partition_estimate(_ms_metadata, _estimated_partitions));
|
||||
_table_s.get_compaction_strategy().adjust_partition_estimate(_ms_metadata, _estimated_partitions, _schema));
|
||||
}
|
||||
|
||||
void setup_new_sstable(shared_sstable& sst) {
|
||||
@@ -1595,7 +1595,7 @@ private:
|
||||
uint64_t partitions_per_sstable(shard_id s) const {
|
||||
uint64_t estimated_sstables = std::max(uint64_t(1), uint64_t(ceil(double(_estimation_per_shard[s].estimated_size) / _max_sstable_size)));
|
||||
return std::min(uint64_t(ceil(double(_estimation_per_shard[s].estimated_partitions) / estimated_sstables)),
|
||||
_table_s.get_compaction_strategy().adjust_partition_estimate(_ms_metadata, _estimation_per_shard[s].estimated_partitions));
|
||||
_table_s.get_compaction_strategy().adjust_partition_estimate(_ms_metadata, _estimation_per_shard[s].estimated_partitions, _schema));
|
||||
}
|
||||
public:
|
||||
resharding_compaction(table_state& table_s, sstables::compaction_descriptor descriptor, compaction_data& cdata)
|
||||
@@ -1800,7 +1800,7 @@ get_fully_expired_sstables(const table_state& table_s, const std::vector<sstable
|
||||
int64_t min_timestamp = std::numeric_limits<int64_t>::max();
|
||||
|
||||
for (auto& sstable : overlapping) {
|
||||
auto gc_before = sstable->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before = sstable->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
if (sstable->get_max_local_deletion_time() >= gc_before) {
|
||||
min_timestamp = std::min(min_timestamp, sstable->get_stats_metadata().min_timestamp);
|
||||
}
|
||||
@@ -1819,7 +1819,7 @@ get_fully_expired_sstables(const table_state& table_s, const std::vector<sstable
|
||||
|
||||
// SStables that do not contain live data is added to list of possibly expired sstables.
|
||||
for (auto& candidate : compacting) {
|
||||
auto gc_before = candidate->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before = candidate->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
clogger.debug("Checking if candidate of generation {} and max_deletion_time {} is expired, gc_before is {}",
|
||||
candidate->generation(), candidate->get_stats_metadata().max_local_deletion_time, gc_before);
|
||||
// A fully expired sstable which has an ancestor undeleted shouldn't be compacted because
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "sstables/exceptions.hh"
|
||||
#include "sstables/sstable_directory.hh"
|
||||
#include "locator/abstract_replication_strategy.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
@@ -1147,6 +1148,11 @@ protected:
|
||||
}
|
||||
|
||||
virtual future<compaction_manager::compaction_stats_opt> do_run() override {
|
||||
if (!is_system_keyspace(_status.keyspace)) {
|
||||
co_await utils::get_local_injector().inject_with_handler("compaction_regular_compaction_task_executor_do_run",
|
||||
[] (auto& handler) { return handler.wait_for_message(db::timeout_clock::now() + 10s); });
|
||||
}
|
||||
|
||||
co_await coroutine::switch_to(_cm.compaction_sg());
|
||||
|
||||
for (;;) {
|
||||
@@ -1789,7 +1795,11 @@ future<> compaction_manager::perform_cleanup(owned_ranges_ptr sorted_owned_range
|
||||
};
|
||||
|
||||
cmlog.debug("perform_cleanup: waiting for sstables to become eligible for cleanup");
|
||||
co_await t.get_staging_done_condition().when(sleep_duration, [&] { return has_sstables_eligible_for_compaction(); });
|
||||
try {
|
||||
co_await t.get_staging_done_condition().when(sleep_duration, [&] { return has_sstables_eligible_for_compaction(); });
|
||||
} catch (const seastar::condition_variable_timed_out&) {
|
||||
// Ignored. Keep retrying for max_idle_duration
|
||||
}
|
||||
|
||||
if (!has_sstables_eligible_for_compaction()) {
|
||||
continue;
|
||||
|
||||
@@ -51,7 +51,7 @@ std::vector<compaction_descriptor> compaction_strategy_impl::get_cleanup_compact
|
||||
}));
|
||||
}
|
||||
|
||||
bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const tombstone_gc_state& gc_state) {
|
||||
bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const table_state& t) {
|
||||
if (_disable_tombstone_compaction) {
|
||||
return false;
|
||||
}
|
||||
@@ -62,11 +62,11 @@ bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& s
|
||||
if (db_clock::now()-_tombstone_compaction_interval < sst->data_file_write_time()) {
|
||||
return false;
|
||||
}
|
||||
auto gc_before = sst->get_gc_before_for_drop_estimation(compaction_time, gc_state);
|
||||
auto gc_before = sst->get_gc_before_for_drop_estimation(compaction_time, t.get_tombstone_gc_state(), t.schema());
|
||||
return sst->estimate_droppable_tombstone_ratio(gc_before) >= _tombstone_threshold;
|
||||
}
|
||||
|
||||
uint64_t compaction_strategy_impl::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate) const {
|
||||
uint64_t compaction_strategy_impl::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr schema) const {
|
||||
return partition_estimate;
|
||||
}
|
||||
|
||||
@@ -704,8 +704,8 @@ compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema
|
||||
return _compaction_strategy_impl->get_reshaping_job(std::move(input), schema, mode);
|
||||
}
|
||||
|
||||
uint64_t compaction_strategy::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate) const {
|
||||
return _compaction_strategy_impl->adjust_partition_estimate(ms_meta, partition_estimate);
|
||||
uint64_t compaction_strategy::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr schema) const {
|
||||
return _compaction_strategy_impl->adjust_partition_estimate(ms_meta, partition_estimate, std::move(schema));
|
||||
}
|
||||
|
||||
reader_consumer_v2 compaction_strategy::make_interposer_consumer(const mutation_source_metadata& ms_meta, reader_consumer_v2 end_consumer) const {
|
||||
|
||||
@@ -104,7 +104,7 @@ public:
|
||||
|
||||
compaction_backlog_tracker make_backlog_tracker() const;
|
||||
|
||||
uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate) const;
|
||||
uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr) const;
|
||||
|
||||
reader_consumer_v2 make_interposer_consumer(const mutation_source_metadata& ms_meta, reader_consumer_v2 end_consumer) const;
|
||||
|
||||
|
||||
@@ -64,11 +64,11 @@ public:
|
||||
|
||||
// Check if a given sstable is entitled for tombstone compaction based on its
|
||||
// droppable tombstone histogram and gc_before.
|
||||
bool worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const tombstone_gc_state& gc_state);
|
||||
bool worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const table_state& t);
|
||||
|
||||
virtual std::unique_ptr<compaction_backlog_tracker::impl> make_backlog_tracker() const = 0;
|
||||
|
||||
virtual uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate) const;
|
||||
virtual uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr schema) const;
|
||||
|
||||
virtual reader_consumer_v2 make_interposer_consumer(const mutation_source_metadata& ms_meta, reader_consumer_v2 end_consumer) const;
|
||||
|
||||
|
||||
@@ -51,15 +51,15 @@ compaction_descriptor leveled_compaction_strategy::get_sstables_for_compaction(t
|
||||
auto& sstables = manifest.get_level(level);
|
||||
// filter out sstables which droppable tombstone ratio isn't greater than the defined threshold.
|
||||
auto e = boost::range::remove_if(sstables, [this, compaction_time, &table_s] (const sstables::shared_sstable& sst) -> bool {
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s.get_tombstone_gc_state());
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s);
|
||||
});
|
||||
sstables.erase(e, sstables.end());
|
||||
if (sstables.empty()) {
|
||||
continue;
|
||||
}
|
||||
auto& sst = *std::max_element(sstables.begin(), sstables.end(), [&] (auto& i, auto& j) {
|
||||
auto gc_before1 = i->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before2 = j->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before1 = i->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
auto gc_before2 = j->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
return i->estimate_droppable_tombstone_ratio(gc_before1) < j->estimate_droppable_tombstone_ratio(gc_before2);
|
||||
});
|
||||
return sstables::compaction_descriptor({ sst }, sst->get_sstable_level());
|
||||
|
||||
@@ -243,7 +243,7 @@ size_tiered_compaction_strategy::get_sstables_for_compaction(table_state& table_
|
||||
for (auto&& sstables : buckets | boost::adaptors::reversed) {
|
||||
// filter out sstables which droppable tombstone ratio isn't greater than the defined threshold.
|
||||
auto e = boost::range::remove_if(sstables, [this, compaction_time, &table_s] (const sstables::shared_sstable& sst) -> bool {
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s.get_tombstone_gc_state());
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s);
|
||||
});
|
||||
sstables.erase(e, sstables.end());
|
||||
if (sstables.empty()) {
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
#include "sstables/sstables.hh"
|
||||
#include "sstables/sstable_directory.hh"
|
||||
#include "utils/pretty_printers.hh"
|
||||
#include "db/config.hh"
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace replica {
|
||||
|
||||
@@ -254,11 +257,129 @@ future<> run_table_tasks(replica::database& db, std::vector<table_tasks_info> ta
|
||||
}
|
||||
}
|
||||
|
||||
struct keyspace_tasks_info {
|
||||
tasks::task_manager::task_ptr task;
|
||||
sstring keyspace;
|
||||
std::vector<table_info> table_infos;
|
||||
|
||||
keyspace_tasks_info(tasks::task_manager::task_ptr t, sstring ks_name, std::vector<table_info> t_infos)
|
||||
: task(t)
|
||||
, keyspace(std::move(ks_name))
|
||||
, table_infos(std::move(t_infos))
|
||||
{}
|
||||
};
|
||||
|
||||
future<> run_keyspace_tasks(replica::database& db, std::vector<keyspace_tasks_info> keyspace_tasks, seastar::condition_variable& cv, tasks::task_manager::task_ptr& current_task, bool sort) {
|
||||
std::exception_ptr ex;
|
||||
|
||||
// While compaction is run on one table, the size of tables may significantly change.
|
||||
// Thus, they are sorted before each invidual compaction and the smallest keyspace is chosen.
|
||||
while (!keyspace_tasks.empty()) {
|
||||
try {
|
||||
if (sort) {
|
||||
// Major compact smaller tables first, to increase chances of success if low on space.
|
||||
// Tables will be kept in descending order.
|
||||
std::ranges::sort(keyspace_tasks, std::greater<>(), [&] (const keyspace_tasks_info& kti) {
|
||||
try {
|
||||
return std::accumulate(kti.table_infos.begin(), kti.table_infos.end(), int64_t(0), [&] (int64_t sum, const table_info& t) {
|
||||
try {
|
||||
sum += db.find_column_family(t.id).get_stats().live_disk_space_used;
|
||||
} catch (const replica::no_such_column_family&) {
|
||||
// ignore
|
||||
}
|
||||
return sum;
|
||||
});
|
||||
} catch (const replica::no_such_keyspace&) {
|
||||
return int64_t(-1);
|
||||
}
|
||||
});
|
||||
}
|
||||
// Task responsible for the smallest keyspace.
|
||||
current_task = keyspace_tasks.back().task;
|
||||
keyspace_tasks.pop_back();
|
||||
cv.broadcast();
|
||||
co_await current_task->done();
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
current_task = nullptr;
|
||||
cv.broken(ex);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ex) {
|
||||
// Wait for all tasks even on failure.
|
||||
for (auto& kti: keyspace_tasks) {
|
||||
co_await kti.task->done();
|
||||
}
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
}
|
||||
|
||||
sstring major_compaction_task_impl::to_string(flush_mode fm) {
|
||||
switch (fm) {
|
||||
case flush_mode::skip: return "skip";
|
||||
case flush_mode::compacted_tables: return "compacted_tables";
|
||||
case flush_mode::all_tables: return "all_tables";
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
static future<bool> maybe_flush_all_tables(sharded<replica::database>& db) {
|
||||
auto interval = db.local().get_config().compaction_flush_all_tables_before_major_seconds();
|
||||
if (interval) {
|
||||
auto when = db_clock::now() - interval * 1s;
|
||||
if (co_await replica::database::get_all_tables_flushed_at(db) <= when) {
|
||||
co_await db.invoke_on_all([&] (replica::database& db) -> future<> {
|
||||
co_await db.flush_all_tables();
|
||||
});
|
||||
co_return true;
|
||||
}
|
||||
}
|
||||
co_return false;
|
||||
}
|
||||
|
||||
future<> global_major_compaction_task_impl::run() {
|
||||
bool flushed_all_tables = false;
|
||||
if (_flush_mode == flush_mode::all_tables) {
|
||||
flushed_all_tables = co_await maybe_flush_all_tables(_db);
|
||||
}
|
||||
|
||||
std::unordered_map<sstring, std::vector<table_info>> tables_by_keyspace;
|
||||
auto tables_meta = _db.local().get_tables_metadata().get_column_families_copy();
|
||||
for (const auto& [table_id, t] : tables_meta) {
|
||||
const auto& ks_name = t->schema()->ks_name();
|
||||
const auto& table_name = t->schema()->cf_name();
|
||||
tables_by_keyspace[ks_name].emplace_back(table_name, table_id);
|
||||
}
|
||||
seastar::condition_variable cv;
|
||||
tasks::task_manager::task_ptr current_task;
|
||||
tasks::task_info parent_info{_status.id, _status.shard};
|
||||
std::vector<keyspace_tasks_info> keyspace_tasks;
|
||||
flush_mode fm = flushed_all_tables ? flush_mode::skip : _flush_mode;
|
||||
for (auto& [ks, table_infos] : tables_by_keyspace) {
|
||||
auto task = co_await _module->make_and_start_task<major_keyspace_compaction_task_impl>(parent_info, ks, parent_info.id, _db, table_infos, fm,
|
||||
&cv, ¤t_task);
|
||||
keyspace_tasks.emplace_back(std::move(task), ks, std::move(table_infos));
|
||||
}
|
||||
co_await run_keyspace_tasks(_db.local(), keyspace_tasks, cv, current_task, false);
|
||||
}
|
||||
|
||||
future<> major_keyspace_compaction_task_impl::run() {
|
||||
if (_cv) {
|
||||
co_await wait_for_your_turn(*_cv, *_current_task, _status.id);
|
||||
}
|
||||
|
||||
bool flushed_all_tables = false;
|
||||
if (_flush_mode == flush_mode::all_tables) {
|
||||
flushed_all_tables = co_await maybe_flush_all_tables(_db);
|
||||
}
|
||||
|
||||
flush_mode fm = flushed_all_tables ? flush_mode::skip : _flush_mode;
|
||||
co_await _db.invoke_on_all([&] (replica::database& db) -> future<> {
|
||||
tasks::task_info parent_info{_status.id, _status.shard};
|
||||
auto& module = db.get_compaction_manager().get_task_manager_module();
|
||||
auto task = co_await module.make_and_start_task<shard_major_keyspace_compaction_task_impl>(parent_info, _status.keyspace, _status.id, db, _table_infos);
|
||||
auto task = co_await module.make_and_start_task<shard_major_keyspace_compaction_task_impl>(parent_info, _status.keyspace, _status.id, db, _table_infos, fm);
|
||||
co_await task->done();
|
||||
});
|
||||
}
|
||||
@@ -269,7 +390,7 @@ future<> shard_major_keyspace_compaction_task_impl::run() {
|
||||
tasks::task_info parent_info{_status.id, _status.shard};
|
||||
std::vector<table_tasks_info> table_tasks;
|
||||
for (auto& ti : _local_tables) {
|
||||
table_tasks.emplace_back(co_await _module->make_and_start_task<table_major_keyspace_compaction_task_impl>(parent_info, _status.keyspace, ti.name, _status.id, _db, ti, cv, current_task), ti);
|
||||
table_tasks.emplace_back(co_await _module->make_and_start_task<table_major_keyspace_compaction_task_impl>(parent_info, _status.keyspace, ti.name, _status.id, _db, ti, cv, current_task, _flush_mode), ti);
|
||||
}
|
||||
|
||||
co_await run_table_tasks(_db, std::move(table_tasks), cv, current_task, true);
|
||||
@@ -278,8 +399,9 @@ future<> shard_major_keyspace_compaction_task_impl::run() {
|
||||
future<> table_major_keyspace_compaction_task_impl::run() {
|
||||
co_await wait_for_your_turn(_cv, _current_task, _status.id);
|
||||
tasks::task_info info{_status.id, _status.shard};
|
||||
co_await run_on_table("force_keyspace_compaction", _db, _status.keyspace, _ti, [info] (replica::table& t) {
|
||||
return t.compact_all_sstables(info);
|
||||
replica::table::do_flush do_flush(_flush_mode != flush_mode::skip);
|
||||
co_await run_on_table("force_keyspace_compaction", _db, _status.keyspace, _ti, [info, do_flush] (replica::table& t) {
|
||||
return t.compact_all_sstables(info, do_flush);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "compaction/compaction.hh"
|
||||
#include "replica/database_fwd.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
@@ -45,6 +47,12 @@ protected:
|
||||
|
||||
class major_compaction_task_impl : public compaction_task_impl {
|
||||
public:
|
||||
enum class flush_mode {
|
||||
skip, // Skip flushing. Useful when application explicitly flushes all tables prior to compaction
|
||||
compacted_tables, // Flush only the compacted keyspace/tables
|
||||
all_tables // Flush all tables in the database prior to compaction
|
||||
};
|
||||
|
||||
major_compaction_task_impl(tasks::task_manager::module_ptr module,
|
||||
tasks::task_id id,
|
||||
unsigned sequence_number,
|
||||
@@ -52,8 +60,10 @@ public:
|
||||
std::string keyspace,
|
||||
std::string table,
|
||||
std::string entity,
|
||||
tasks::task_id parent_id) noexcept
|
||||
tasks::task_id parent_id,
|
||||
flush_mode fm = flush_mode::compacted_tables) noexcept
|
||||
: compaction_task_impl(module, id, sequence_number, std::move(scope), std::move(keyspace), std::move(table), std::move(entity), parent_id)
|
||||
, _flush_mode(fm)
|
||||
{
|
||||
// FIXME: add progress units
|
||||
}
|
||||
@@ -61,22 +71,54 @@ public:
|
||||
virtual std::string type() const override {
|
||||
return "major compaction";
|
||||
}
|
||||
|
||||
static sstring to_string(flush_mode);
|
||||
protected:
|
||||
flush_mode _flush_mode;
|
||||
|
||||
virtual future<> run() override = 0;
|
||||
};
|
||||
|
||||
class global_major_compaction_task_impl : public major_compaction_task_impl {
|
||||
private:
|
||||
sharded<replica::database>& _db;
|
||||
public:
|
||||
global_major_compaction_task_impl(tasks::task_manager::module_ptr module,
|
||||
sharded<replica::database>& db,
|
||||
std::optional<flush_mode> fm = std::nullopt) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(), module->new_sequence_number(), "global", "", "", "", tasks::task_id::create_null_id(),
|
||||
fm.value_or(flush_mode::all_tables))
|
||||
, _db(db)
|
||||
{}
|
||||
protected:
|
||||
virtual future<> run() override;
|
||||
};
|
||||
|
||||
class major_keyspace_compaction_task_impl : public major_compaction_task_impl {
|
||||
private:
|
||||
sharded<replica::database>& _db;
|
||||
std::vector<table_info> _table_infos;
|
||||
// _cvp and _current_task are engaged when the task is invoked from
|
||||
// global_major_compaction_task_impl
|
||||
seastar::condition_variable* _cv;
|
||||
tasks::task_manager::task_ptr* _current_task;
|
||||
public:
|
||||
major_keyspace_compaction_task_impl(tasks::task_manager::module_ptr module,
|
||||
std::string keyspace,
|
||||
tasks::task_id parent_id,
|
||||
sharded<replica::database>& db,
|
||||
std::vector<table_info> table_infos) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(), module->new_sequence_number(), "keyspace", std::move(keyspace), "", "", tasks::task_id::create_null_id())
|
||||
std::vector<table_info> table_infos,
|
||||
std::optional<flush_mode> fm = std::nullopt,
|
||||
seastar::condition_variable* cv = nullptr,
|
||||
tasks::task_manager::task_ptr* current_task = nullptr) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(),
|
||||
parent_id ? 0 : module->new_sequence_number(),
|
||||
"keyspace", std::move(keyspace), "", "", parent_id,
|
||||
fm.value_or(flush_mode::all_tables))
|
||||
, _db(db)
|
||||
, _table_infos(std::move(table_infos))
|
||||
, _cv(cv)
|
||||
, _current_task(current_task)
|
||||
{}
|
||||
protected:
|
||||
virtual future<> run() override;
|
||||
@@ -91,8 +133,9 @@ public:
|
||||
std::string keyspace,
|
||||
tasks::task_id parent_id,
|
||||
replica::database& db,
|
||||
std::vector<table_info> local_tables) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(), 0, "shard", std::move(keyspace), "", "", parent_id)
|
||||
std::vector<table_info> local_tables,
|
||||
flush_mode fm) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(), 0, "shard", std::move(keyspace), "", "", parent_id, fm)
|
||||
, _db(db)
|
||||
, _local_tables(std::move(local_tables))
|
||||
{}
|
||||
@@ -114,8 +157,9 @@ public:
|
||||
replica::database& db,
|
||||
table_info ti,
|
||||
seastar::condition_variable& cv,
|
||||
tasks::task_manager::task_ptr& current_task) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(), 0, "table", std::move(keyspace), std::move(table), "", parent_id)
|
||||
tasks::task_manager::task_ptr& current_task,
|
||||
flush_mode fm) noexcept
|
||||
: major_compaction_task_impl(module, tasks::task_id::create_random_id(), 0, "table", std::move(keyspace), std::move(table), "", parent_id, fm)
|
||||
, _db(db)
|
||||
, _ti(std::move(ti))
|
||||
, _cv(cv)
|
||||
@@ -660,8 +704,21 @@ public:
|
||||
virtual std::string type() const override {
|
||||
return "regular compaction";
|
||||
}
|
||||
|
||||
virtual tasks::is_internal is_internal() const noexcept override {
|
||||
return tasks::is_internal::yes;
|
||||
}
|
||||
protected:
|
||||
virtual future<> run() override = 0;
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace compaction
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<major_compaction_task_impl::flush_mode> {
|
||||
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
|
||||
template <typename FormatContext>
|
||||
auto format(const major_compaction_task_impl::flush_mode& fm, FormatContext& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{}", major_compaction_task_impl::to_string(fm));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -184,16 +184,27 @@ public:
|
||||
};
|
||||
};
|
||||
|
||||
uint64_t time_window_compaction_strategy::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate) const {
|
||||
if (!ms_meta.min_timestamp || !ms_meta.max_timestamp) {
|
||||
// Not enough information, we assume the worst
|
||||
return partition_estimate / max_data_segregation_window_count;
|
||||
}
|
||||
const auto min_window = get_window_for(_options, *ms_meta.min_timestamp);
|
||||
const auto max_window = get_window_for(_options, *ms_meta.max_timestamp);
|
||||
const auto window_size = get_window_size(_options);
|
||||
uint64_t time_window_compaction_strategy::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr s) const {
|
||||
// If not enough information, we assume the worst
|
||||
auto estimated_window_count = max_data_segregation_window_count;
|
||||
auto default_ttl = std::chrono::duration_cast<std::chrono::microseconds>(s->default_time_to_live());
|
||||
bool min_and_max_ts_available = ms_meta.min_timestamp && ms_meta.max_timestamp;
|
||||
auto estimate_window_count = [this] (timestamp_type min_window, timestamp_type max_window) {
|
||||
const auto window_size = get_window_size(_options);
|
||||
return (max_window + (window_size - 1) - min_window) / window_size;
|
||||
};
|
||||
|
||||
auto estimated_window_count = (max_window + (window_size - 1) - min_window) / window_size;
|
||||
if (!min_and_max_ts_available && default_ttl.count()) {
|
||||
auto min_window = get_window_for(_options, timestamp_type(0));
|
||||
auto max_window = get_window_for(_options, timestamp_type(default_ttl.count()));
|
||||
|
||||
estimated_window_count = estimate_window_count(min_window, max_window);
|
||||
} else if (min_and_max_ts_available) {
|
||||
auto min_window = get_window_for(_options, *ms_meta.min_timestamp);
|
||||
auto max_window = get_window_for(_options, *ms_meta.max_timestamp);
|
||||
|
||||
estimated_window_count = estimate_window_count(min_window, max_window);
|
||||
}
|
||||
|
||||
return partition_estimate / std::max(1UL, uint64_t(estimated_window_count));
|
||||
}
|
||||
@@ -366,7 +377,7 @@ time_window_compaction_strategy::get_next_non_expired_sstables(table_state& tabl
|
||||
// if there is no sstable to compact in standard way, try compacting single sstable whose droppable tombstone
|
||||
// ratio is greater than threshold.
|
||||
auto e = boost::range::remove_if(non_expiring_sstables, [this, compaction_time, &table_s] (const shared_sstable& sst) -> bool {
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s.get_tombstone_gc_state());
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s);
|
||||
});
|
||||
non_expiring_sstables.erase(e, non_expiring_sstables.end());
|
||||
if (non_expiring_sstables.empty()) {
|
||||
|
||||
@@ -162,7 +162,7 @@ public:
|
||||
|
||||
virtual std::unique_ptr<compaction_backlog_tracker::impl> make_backlog_tracker() const override;
|
||||
|
||||
virtual uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate) const override;
|
||||
virtual uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr s) const override;
|
||||
|
||||
virtual reader_consumer_v2 make_interposer_consumer(const mutation_source_metadata& ms_meta, reader_consumer_v2 end_consumer) const override;
|
||||
|
||||
|
||||
@@ -852,6 +852,7 @@ scylla_core = (['message/messaging_service.cc',
|
||||
'utils/rjson.cc',
|
||||
'utils/human_readable.cc',
|
||||
'utils/histogram_metrics_helper.cc',
|
||||
'utils/on_internal_error.cc',
|
||||
'utils/pretty_printers.cc',
|
||||
'converting_mutation_partition_applier.cc',
|
||||
'readers/combined.cc',
|
||||
@@ -1240,6 +1241,8 @@ api = ['api/api.cc',
|
||||
Json2Code('api/api-doc/error_injection.json'),
|
||||
'api/authorization_cache.cc',
|
||||
Json2Code('api/api-doc/authorization_cache.json'),
|
||||
'api/raft.cc',
|
||||
Json2Code('api/api-doc/raft.json'),
|
||||
]
|
||||
|
||||
alternator = [
|
||||
@@ -1451,7 +1454,7 @@ deps['test/boost/bytes_ostream_test'] = [
|
||||
"test/lib/log.cc",
|
||||
]
|
||||
deps['test/boost/input_stream_test'] = ['test/boost/input_stream_test.cc']
|
||||
deps['test/boost/UUID_test'] = ['utils/UUID_gen.cc', 'test/boost/UUID_test.cc', 'utils/uuid.cc', 'utils/dynamic_bitset.cc', 'utils/hashers.cc']
|
||||
deps['test/boost/UUID_test'] = ['utils/UUID_gen.cc', 'test/boost/UUID_test.cc', 'utils/uuid.cc', 'utils/dynamic_bitset.cc', 'utils/hashers.cc', 'utils/on_internal_error.cc']
|
||||
deps['test/boost/murmur_hash_test'] = ['bytes.cc', 'utils/murmur_hash.cc', 'test/boost/murmur_hash_test.cc']
|
||||
deps['test/boost/allocation_strategy_test'] = ['test/boost/allocation_strategy_test.cc', 'utils/logalloc.cc', 'utils/dynamic_bitset.cc']
|
||||
deps['test/boost/log_heap_test'] = ['test/boost/log_heap_test.cc']
|
||||
|
||||
@@ -1065,6 +1065,9 @@ void query_processor::migration_subscriber::on_update_aggregate(const sstring& k
|
||||
void query_processor::migration_subscriber::on_update_view(
|
||||
const sstring& ks_name,
|
||||
const sstring& view_name, bool columns_changed) {
|
||||
// scylladb/scylladb#16392 - Materialized views are also tables so we need at least handle
|
||||
// them as such when changed.
|
||||
on_update_column_family(ks_name, view_name, columns_changed);
|
||||
}
|
||||
|
||||
void query_processor::migration_subscriber::on_update_tablet_metadata() {
|
||||
|
||||
@@ -433,12 +433,17 @@ protected:
|
||||
}
|
||||
};
|
||||
|
||||
::shared_ptr<selection> selection::wildcard(schema_ptr schema) {
|
||||
// Return a list of columns that "SELECT *" should show - these are all
|
||||
// columns except potentially some that are is_hidden_from_cql() (currently,
|
||||
// those can be the "virtual columns" used in materialized views).
|
||||
// The list points to column_definition objects in the given schema_ptr,
|
||||
// which can be used only as long as the caller keeps the schema_ptr alive.
|
||||
std::vector<const column_definition*> selection::wildcard_columns(schema_ptr schema) {
|
||||
auto columns = schema->all_columns_in_select_order();
|
||||
// filter out hidden columns, which should not be seen by the
|
||||
// user when doing "SELECT *". We also disallow selecting them
|
||||
// individually (see column_identifier::new_selector_factory()).
|
||||
auto cds = boost::copy_range<std::vector<const column_definition*>>(
|
||||
return boost::copy_range<std::vector<const column_definition*>>(
|
||||
columns |
|
||||
boost::adaptors::filtered([](const column_definition& c) {
|
||||
return !c.is_hidden_from_cql();
|
||||
@@ -446,7 +451,10 @@ protected:
|
||||
boost::adaptors::transformed([](const column_definition& c) {
|
||||
return &c;
|
||||
}));
|
||||
return simple_selection::make(schema, std::move(cds), true);
|
||||
}
|
||||
|
||||
::shared_ptr<selection> selection::wildcard(schema_ptr schema) {
|
||||
return simple_selection::make(schema, wildcard_columns(schema), true);
|
||||
}
|
||||
|
||||
::shared_ptr<selection> selection::for_columns(schema_ptr schema, std::vector<const column_definition*> columns) {
|
||||
|
||||
@@ -118,6 +118,7 @@ public:
|
||||
}
|
||||
|
||||
static ::shared_ptr<selection> wildcard(schema_ptr schema);
|
||||
static std::vector<const column_definition*> wildcard_columns(schema_ptr schema);
|
||||
static ::shared_ptr<selection> for_columns(schema_ptr schema, std::vector<const column_definition*> columns);
|
||||
|
||||
// Adds a column to the selection and result set. Returns an index within the result set row.
|
||||
|
||||
@@ -135,6 +135,18 @@ user_type alter_type_statement::add_or_alter::do_add(data_dictionary::database d
|
||||
throw exceptions::invalid_request_exception(format("Cannot add new field to type {}: maximum number of fields reached", _name));
|
||||
}
|
||||
|
||||
if (_field_type->is_duration()) {
|
||||
auto&& ks = db.find_keyspace(keyspace());
|
||||
for (auto&& schema : ks.metadata()->cf_meta_data() | boost::adaptors::map_values) {
|
||||
for (auto&& column : schema->clustering_key_columns()) {
|
||||
if (column.type->references_user_type(_name.get_keyspace(), _name.get_user_type_name())) {
|
||||
throw exceptions::invalid_request_exception(format("Cannot add new field to type {} because it is used in the clustering key column {} of table {}.{} where durations are not allowed",
|
||||
_name.to_cql_string(), column.name_as_text(), schema->ks_name(), schema->cf_name()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<bytes> new_names(to_update->field_names());
|
||||
new_names.push_back(_field_name->name());
|
||||
std::vector<data_type> new_types(to_update->field_types());
|
||||
|
||||
@@ -226,7 +226,8 @@ future<> select_statement::check_access(query_processor& qp, const service::clie
|
||||
}
|
||||
if (!_selection->is_trivial()) {
|
||||
std::vector<::shared_ptr<functions::function>> used_functions = _selection->used_functions();
|
||||
for (const auto& used_function : used_functions) {
|
||||
auto not_native = [] (::shared_ptr<functions::function> func) { return !func->is_native(); };
|
||||
for (const auto& used_function : used_functions | std::ranges::views::filter(not_native)) {
|
||||
sstring encoded_signature = auth::encode_signature(used_function->name().name, used_function->arg_types());
|
||||
co_await state.has_function_access(used_function->name().keyspace, encoded_signature, auth::permission::EXECUTE);
|
||||
}
|
||||
@@ -1660,7 +1661,7 @@ schema_ptr mutation_fragments_select_statement::generate_output_schema(schema_pt
|
||||
|
||||
future<exceptions::coordinator_result<service::storage_proxy_coordinator_query_result>>
|
||||
mutation_fragments_select_statement::do_query(
|
||||
const locator::node* this_node,
|
||||
locator::host_id this_node,
|
||||
service::storage_proxy& sp,
|
||||
schema_ptr schema,
|
||||
lw_shared_ptr<query::read_command> cmd,
|
||||
@@ -1670,7 +1671,7 @@ mutation_fragments_select_statement::do_query(
|
||||
auto res = co_await replica::mutation_dump::dump_mutations(sp.get_db(), schema, _underlying_schema, partition_ranges, *cmd, optional_params.timeout(sp));
|
||||
service::replicas_per_token_range last_replicas;
|
||||
if (this_node) {
|
||||
last_replicas.emplace(dht::token_range::make_open_ended_both_sides(), std::vector<locator::host_id>{this_node->host_id()});
|
||||
last_replicas.emplace(dht::token_range::make_open_ended_both_sides(), std::vector<locator::host_id>{this_node});
|
||||
}
|
||||
co_return service::storage_proxy_coordinator_query_result{std::move(res), std::move(last_replicas), {}};
|
||||
}
|
||||
@@ -1731,12 +1732,17 @@ mutation_fragments_select_statement::do_execute(query_processor& qp, service::qu
|
||||
auto timeout_duration = get_timeout(state.get_client_state(), options);
|
||||
auto timeout = db::timeout_clock::now() + timeout_duration;
|
||||
|
||||
auto& tbl = qp.proxy().local_db().find_column_family(_underlying_schema);
|
||||
|
||||
// Since this query doesn't go through storage-proxy, we have to take care of pinning erm here.
|
||||
auto erm_keepalive = tbl.get_effective_replication_map();
|
||||
|
||||
if (!aggregate && !_restrictions_need_filtering && (page_size <= 0
|
||||
|| !service::pager::query_pagers::may_need_paging(*_schema, page_size,
|
||||
*command, key_ranges))) {
|
||||
return do_query({}, qp.proxy(), _schema, command, std::move(key_ranges), cl,
|
||||
{timeout, state.get_permit(), state.get_client_state(), state.get_trace_state(), {}, {}})
|
||||
.then(wrap_result_to_error_message([&, this] (service::storage_proxy_coordinator_query_result&& qr) {
|
||||
.then(wrap_result_to_error_message([&, this, erm_keepalive] (service::storage_proxy_coordinator_query_result&& qr) {
|
||||
cql3::selection::result_set_builder builder(*_selection, now);
|
||||
query::result_view::consume(*qr.query_result, std::move(slice),
|
||||
cql3::selection::result_set_builder::visitor(builder, *_schema, *_selection));
|
||||
@@ -1745,16 +1751,14 @@ mutation_fragments_select_statement::do_execute(query_processor& qp, service::qu
|
||||
}));
|
||||
}
|
||||
|
||||
const locator::node* this_node = nullptr;
|
||||
locator::host_id this_node;
|
||||
{
|
||||
auto& tbl = qp.proxy().local_db().find_column_family(_underlying_schema);
|
||||
auto& erm = tbl.get_effective_replication_map();
|
||||
auto& topo = erm->get_topology();
|
||||
this_node = topo.this_node();
|
||||
auto& topo = erm_keepalive->get_topology();
|
||||
this_node = topo.this_node()->host_id();
|
||||
auto state = options.get_paging_state();
|
||||
if (state && !state->get_last_replicas().empty()) {
|
||||
auto last_host = state->get_last_replicas().begin()->second.front();
|
||||
if (last_host != this_node->host_id()) {
|
||||
if (last_host != this_node) {
|
||||
const auto last_node = topo.find_node(last_host);
|
||||
throw exceptions::invalid_request_exception(format(
|
||||
"Moving between coordinators is not allowed in SELECT FROM MUTATION_FRAGMENTS() statements, last page's coordinator was {}{}",
|
||||
@@ -1774,7 +1778,10 @@ mutation_fragments_select_statement::do_execute(query_processor& qp, service::qu
|
||||
command,
|
||||
std::move(key_ranges),
|
||||
_restrictions_need_filtering ? _restrictions : nullptr,
|
||||
std::bind_front(&mutation_fragments_select_statement::do_query, this, this_node));
|
||||
[this, erm_keepalive, this_node] (service::storage_proxy& sp, schema_ptr schema, lw_shared_ptr<query::read_command> cmd, dht::partition_range_vector partition_ranges,
|
||||
db::consistency_level cl, service::storage_proxy_coordinator_query_options optional_params) {
|
||||
return do_query(this_node, sp, std::move(schema), std::move(cmd), std::move(partition_ranges), cl, std::move(optional_params));
|
||||
});
|
||||
|
||||
if (_selection->is_trivial() && !_restrictions_need_filtering && !_per_partition_limit) {
|
||||
return p->fetch_page_generator_result(page_size, now, timeout, _stats).then(wrap_result_to_error_message([this, p = std::move(p)] (result_generator&& generator) {
|
||||
@@ -1901,6 +1908,21 @@ std::unique_ptr<prepared_statement> select_statement::prepare(data_dictionary::d
|
||||
// Force aggregation if GROUP BY is used. This will wrap every column x as first(x).
|
||||
if (!_group_by_columns.empty()) {
|
||||
aggregation_depth = std::max(aggregation_depth, 1u);
|
||||
if (prepared_selectors.empty()) {
|
||||
// We have a "SELECT * GROUP BY". If we leave prepared_selectors
|
||||
// empty, below we choose selection::wildcard() for SELECT *, and
|
||||
// forget to do the "levellize" trick needed for the GROUP BY.
|
||||
// So we need to set prepared_selectors. See #16531.
|
||||
auto all_columns = selection::selection::wildcard_columns(schema);
|
||||
std::vector<::shared_ptr<selection::raw_selector>> select_all;
|
||||
select_all.reserve(all_columns.size());
|
||||
for (const column_definition *cdef : all_columns) {
|
||||
auto name = ::make_shared<cql3::column_identifier::raw>(cdef->name_as_text(), true);
|
||||
select_all.push_back(::make_shared<selection::raw_selector>(
|
||||
expr::unresolved_identifier(std::move(name)), nullptr));
|
||||
}
|
||||
prepared_selectors = selection::raw_selector::to_prepared_selectors(select_all, *schema, db, keyspace());
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& ps : prepared_selectors) {
|
||||
|
||||
@@ -19,10 +19,7 @@
|
||||
#include "index/secondary_index_manager.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "exceptions/coordinator_result.hh"
|
||||
|
||||
namespace locator {
|
||||
class node;
|
||||
} // namespace locator
|
||||
#include "locator/host_id.hh"
|
||||
|
||||
namespace service {
|
||||
class client_state;
|
||||
@@ -341,7 +338,7 @@ public:
|
||||
private:
|
||||
future<exceptions::coordinator_result<service::storage_proxy_coordinator_query_result>>
|
||||
do_query(
|
||||
const locator::node* this_node,
|
||||
locator::host_id this_node,
|
||||
service::storage_proxy& sp,
|
||||
schema_ptr schema,
|
||||
lw_shared_ptr<query::read_command> cmd,
|
||||
|
||||
@@ -56,7 +56,11 @@ future<> use_statement::check_access(query_processor& qp, const service::client_
|
||||
|
||||
future<::shared_ptr<cql_transport::messages::result_message>>
|
||||
use_statement::execute(query_processor& qp, service::query_state& state, const query_options& options, std::optional<service::group0_guard> guard) const {
|
||||
state.get_client_state().set_keyspace(qp.db().real_database(), _keyspace);
|
||||
try {
|
||||
state.get_client_state().set_keyspace(qp.db().real_database(), _keyspace);
|
||||
} catch(...) {
|
||||
return make_exception_future<::shared_ptr<cql_transport::messages::result_message>>(std::current_exception());
|
||||
}
|
||||
auto result =::make_shared<cql_transport::messages::result_message::set_keyspace>(_keyspace);
|
||||
return make_ready_future<::shared_ptr<cql_transport::messages::result_message>>(result);
|
||||
}
|
||||
|
||||
@@ -502,7 +502,7 @@ struct to_json_string_visitor {
|
||||
sstring operator()(const tuple_type_impl& t) { return to_json_string_aux(t, bv); }
|
||||
sstring operator()(const user_type_impl& t) { return to_json_string_aux(t, bv); }
|
||||
sstring operator()(const simple_date_type_impl& t) { return quote_json_string(t.to_string(bv)); }
|
||||
sstring operator()(const time_type_impl& t) { return t.to_string(bv); }
|
||||
sstring operator()(const time_type_impl& t) { return quote_json_string(t.to_string(bv)); }
|
||||
sstring operator()(const empty_type_impl& t) { return "null"; }
|
||||
sstring operator()(const duration_type_impl& t) {
|
||||
auto v = t.deserialize(bv);
|
||||
|
||||
@@ -2628,12 +2628,20 @@ db::commitlog::read_log_file(sstring filename, sstring pfx, commit_load_reader_f
|
||||
return eof || next == pos;
|
||||
}
|
||||
future<> skip(size_t bytes) {
|
||||
pos += bytes;
|
||||
if (pos > file_size) {
|
||||
auto n = std::min(file_size - pos, bytes);
|
||||
pos += n;
|
||||
if (pos == file_size) {
|
||||
eof = true;
|
||||
pos = file_size;
|
||||
}
|
||||
return fin.skip(bytes);
|
||||
if (n < bytes) {
|
||||
// if we are trying to skip past end, we have at least
|
||||
// the bytes skipped or the source from where we read
|
||||
// this corrupt. So add at least four bytes. This is
|
||||
// inexact, but adding the full "bytes" is equally wrong
|
||||
// since it could be complete garbled junk.
|
||||
corrupt_size += std::max(n, sizeof(uint32_t));
|
||||
}
|
||||
return fin.skip(n);
|
||||
}
|
||||
void stop() {
|
||||
eof = true;
|
||||
|
||||
@@ -341,6 +341,10 @@ db::config::config(std::shared_ptr<db::extensions> exts)
|
||||
"If set to higher than 0, ignore the controller's output and set the compaction shares statically. Do not set this unless you know what you are doing and suspect a problem in the controller. This option will be retired when the controller reaches more maturity")
|
||||
, compaction_enforce_min_threshold(this, "compaction_enforce_min_threshold", liveness::LiveUpdate, value_status::Used, false,
|
||||
"If set to true, enforce the min_threshold option for compactions strictly. If false (default), Scylla may decide to compact even if below min_threshold")
|
||||
, compaction_flush_all_tables_before_major_seconds(this, "compaction_flush_all_tables_before_major_seconds", value_status::Used, 86400,
|
||||
"Set the minimum interval in seconds between flushing all tables before each major compaction (default is 86400). "
|
||||
"This option is useful for maximizing tombstone garbage collection by releasing all active commitlog segments. "
|
||||
"Set to 0 to disable automatic flushing all tables before major compaction")
|
||||
/**
|
||||
* @Group Initialization properties
|
||||
* @GroupDescription The minimal properties needed for configuring a cluster.
|
||||
|
||||
@@ -163,6 +163,7 @@ public:
|
||||
named_value<float> memtable_flush_static_shares;
|
||||
named_value<float> compaction_static_shares;
|
||||
named_value<bool> compaction_enforce_min_threshold;
|
||||
named_value<uint32_t> compaction_flush_all_tables_before_major_seconds;
|
||||
named_value<sstring> cluster_name;
|
||||
named_value<sstring> listen_address;
|
||||
named_value<sstring> listen_interface;
|
||||
|
||||
@@ -1879,8 +1879,7 @@ std::vector<schema_ptr> system_keyspace::all_tables(const db::config& cfg) {
|
||||
|
||||
static bool maybe_write_in_user_memory(schema_ptr s) {
|
||||
return (s.get() == system_keyspace::batchlog().get()) || (s.get() == system_keyspace::paxos().get())
|
||||
|| s == system_keyspace::v3::scylla_views_builds_in_progress()
|
||||
|| s == system_keyspace::raft();
|
||||
|| s == system_keyspace::v3::scylla_views_builds_in_progress();
|
||||
}
|
||||
|
||||
future<> system_keyspace::make(
|
||||
@@ -1888,6 +1887,7 @@ future<> system_keyspace::make(
|
||||
replica::database& db) {
|
||||
for (auto&& table : system_keyspace::all_tables(db.get_config())) {
|
||||
co_await db.create_local_system_table(table, maybe_write_in_user_memory(table), erm_factory);
|
||||
co_await db.find_column_family(table).init_storage();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1689,7 +1689,7 @@ future<> view_update_generator::mutate_MV(
|
||||
auto mut_ptr = remote_endpoints.empty() ? std::make_unique<frozen_mutation>(std::move(mut.fm)) : std::make_unique<frozen_mutation>(mut.fm);
|
||||
tracing::trace(tr_state, "Locally applying view update for {}.{}; base token = {}; view token = {}",
|
||||
mut.s->ks_name(), mut.s->cf_name(), base_token, view_token);
|
||||
local_view_update = _proxy.local().mutate_locally(mut.s, *mut_ptr, tr_state, db::commitlog::force_sync::no).then_wrapped(
|
||||
local_view_update = _proxy.local().mutate_mv_locally(mut.s, *mut_ptr, tr_state, db::commitlog::force_sync::no).then_wrapped(
|
||||
[s = mut.s, &stats, &cf_stats, tr_state, base_token, view_token, my_address, mut_ptr = std::move(mut_ptr),
|
||||
units = sem_units.split(sem_units.count())] (future<>&& f) {
|
||||
--stats.writes;
|
||||
|
||||
5
dist/common/scripts/scylla_coredump_setup
vendored
5
dist/common/scripts/scylla_coredump_setup
vendored
@@ -62,8 +62,7 @@ ExternalSizeMax=1024G
|
||||
[Unit]
|
||||
Description=Save coredump to scylla data directory
|
||||
Conflicts=umount.target
|
||||
Before=scylla-server.service
|
||||
After=local-fs.target
|
||||
Before=local-fs.target scylla-server.service
|
||||
DefaultDependencies=no
|
||||
|
||||
[Mount]
|
||||
@@ -73,7 +72,7 @@ Type=none
|
||||
Options=bind
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=local-fs.target
|
||||
'''[1:-1]
|
||||
with open('/etc/systemd/system/var-lib-systemd-coredump.mount', 'w') as f:
|
||||
f.write(dot_mount)
|
||||
|
||||
12
dist/common/scripts/scylla_raid_setup
vendored
12
dist/common/scripts/scylla_raid_setup
vendored
@@ -257,19 +257,19 @@ if __name__ == '__main__':
|
||||
dev_type = 'realpath'
|
||||
LOGGER.error(f'Failed to detect uuid, using {dev_type}: {mount_dev}')
|
||||
|
||||
after = 'local-fs.target'
|
||||
after = ''
|
||||
wants = ''
|
||||
if raid and args.raid_level != '0':
|
||||
after += f' {md_service}'
|
||||
wants = f'\nWants={md_service}'
|
||||
after = wants = 'md_service'
|
||||
opt_discard = ''
|
||||
if args.online_discard:
|
||||
opt_discard = ',discard'
|
||||
unit_data = f'''
|
||||
[Unit]
|
||||
Description=Scylla data directory
|
||||
Before=scylla-server.service
|
||||
After={after}{wants}
|
||||
Before=local-fs.target scylla-server.service
|
||||
After={after}
|
||||
Wants={wants}
|
||||
DefaultDependencies=no
|
||||
|
||||
[Mount]
|
||||
@@ -279,7 +279,7 @@ Type=xfs
|
||||
Options=noatime{opt_discard}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=local-fs.target
|
||||
'''[1:-1]
|
||||
with open(f'/etc/systemd/system/{mntunit_bn}', 'w') as f:
|
||||
f.write(unit_data)
|
||||
|
||||
4
dist/docker/debian/build_docker.sh
vendored
4
dist/docker/debian/build_docker.sh
vendored
@@ -64,7 +64,6 @@ bcp "${packages[@]}" packages/
|
||||
|
||||
bcp dist/docker/etc etc/
|
||||
bcp dist/docker/scylla-housekeeping-service.sh /scylla-housekeeping-service.sh
|
||||
bcp dist/docker/sshd-service.sh /sshd-service.sh
|
||||
|
||||
bcp dist/docker/scyllasetup.py /scyllasetup.py
|
||||
bcp dist/docker/commandlineparser.py /commandlineparser.py
|
||||
@@ -74,10 +73,11 @@ bcp dist/docker/scylla_bashrc /scylla_bashrc
|
||||
|
||||
run apt-get -y clean expire-cache
|
||||
run apt-get -y update
|
||||
run apt-get -y upgrade
|
||||
run apt-get -y install dialog apt-utils
|
||||
run bash -ec "echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections"
|
||||
run bash -ec "rm -rf /etc/rsyslog.conf"
|
||||
run apt-get -y install hostname supervisor openssh-server openssh-client openjdk-11-jre-headless python2 python3 python3-yaml curl rsyslog sudo
|
||||
run apt-get -y install hostname supervisor openjdk-11-jre-headless python2 python3 python3-yaml curl rsyslog sudo
|
||||
run bash -ec "echo LANG=C.UTF-8 > /etc/default/locale"
|
||||
run bash -ec "dpkg -i packages/*.deb"
|
||||
run apt-get -y clean all
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
[program:sshd]
|
||||
command=/sshd-service.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
3
dist/docker/scyllasetup.py
vendored
3
dist/docker/scyllasetup.py
vendored
@@ -75,7 +75,8 @@ class ScyllaSetup:
|
||||
hostname = self._listenAddress
|
||||
else:
|
||||
hostname = subprocess.check_output(['hostname', '-i']).decode('ascii').strip()
|
||||
with open("%s/.cqlshrc" % home, "w") as cqlshrc:
|
||||
self._run(["mkdir", "-p", "%s/.cassandra" % home])
|
||||
with open("%s/.cassandra/cqlshrc" % home, "w") as cqlshrc:
|
||||
cqlshrc.write("[connection]\nhostname = %s\n" % hostname)
|
||||
|
||||
def set_housekeeping(self):
|
||||
|
||||
15
dist/docker/sshd-service.sh
vendored
15
dist/docker/sshd-service.sh
vendored
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! -f /run/sshd ]; then
|
||||
mkdir -p /run/sshd
|
||||
fi
|
||||
|
||||
if [ ! -f /etc/ssh/ssh_host_ed25519_key ]; then
|
||||
ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ''
|
||||
fi
|
||||
if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then
|
||||
ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N ''
|
||||
fi
|
||||
|
||||
/usr/sbin/sshd -D
|
||||
|
||||
@@ -91,7 +91,7 @@ redirects: setup
|
||||
# Preview commands
|
||||
.PHONY: preview
|
||||
preview: setup
|
||||
$(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --host $(PREVIEW_HOST) --port 5500 --ignore '_data/*'
|
||||
$(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --host $(PREVIEW_HOST) --port 5500 --ignore *.csv --ignore *.yaml
|
||||
|
||||
.PHONY: multiversionpreview
|
||||
multiversionpreview: multiversion
|
||||
|
||||
@@ -118,6 +118,7 @@ class AMIVersionsTemplateDirective(Directive):
|
||||
option_spec = {
|
||||
"version": directives.unchanged,
|
||||
"exclude": directives.unchanged,
|
||||
"only_latest": directives.flag,
|
||||
}
|
||||
|
||||
def _extract_version_from_filename(self, filename):
|
||||
@@ -144,10 +145,28 @@ class AMIVersionsTemplateDirective(Directive):
|
||||
version = self._extract_version_from_filename(filename)
|
||||
return tuple(map(int, version.split("."))) if version else (0,)
|
||||
|
||||
def _get_current_version(self, current_version, stable_version):
|
||||
prefix = 'branch-'
|
||||
version = current_version
|
||||
|
||||
if current_version.startswith(prefix):
|
||||
version = current_version
|
||||
elif not stable_version.startswith(prefix):
|
||||
LOGGER.error("Invalid stable_version format in conf.py. It should start with 'branch-'")
|
||||
else:
|
||||
version = stable_version
|
||||
|
||||
return version.replace(prefix, '')
|
||||
|
||||
def run(self):
|
||||
app = self.state.document.settings.env.app
|
||||
version_pattern = self.options.get("version", "")
|
||||
current_version = os.environ.get('SPHINX_MULTIVERSION_NAME', '')
|
||||
stable_version = app.config.smv_latest_version
|
||||
|
||||
version_pattern = self._get_current_version(current_version, stable_version)
|
||||
version_options = self.options.get("version", "")
|
||||
if version_options:
|
||||
version_pattern = version_options
|
||||
exclude_patterns = self.options.get("exclude", "").split(",")
|
||||
|
||||
download_directory = os.path.join(
|
||||
@@ -169,6 +188,8 @@ class AMIVersionsTemplateDirective(Directive):
|
||||
LOGGER.warning(
|
||||
f"No files match in directory '{download_directory}' with version pattern '{version_pattern}'."
|
||||
)
|
||||
elif "only_latest" in self.options:
|
||||
files = [files[0]]
|
||||
|
||||
output = []
|
||||
for file in files:
|
||||
|
||||
@@ -1,14 +1,23 @@
|
||||
import os
|
||||
import re
|
||||
import yaml
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import jinja2
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.application import Sphinx
|
||||
from sphinxcontrib.datatemplates.directive import DataTemplateYAML
|
||||
from sphinx.directives import ObjectDescription
|
||||
from sphinx.util import logging, status_iterator, ws_re
|
||||
from sphinx.util.docfields import Field
|
||||
from sphinx.util.docutils import switch_source_input, SphinxDirective
|
||||
from sphinx.util.nodes import make_id, nested_parse_with_titles
|
||||
from sphinx.jinja2glue import BuiltinTemplateLoader
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import directives
|
||||
from docutils.statemachine import StringList
|
||||
|
||||
CONFIG_FILE_PATH = "../db/config.cc"
|
||||
CONFIG_HEADER_FILE_PATH = "../db/config.hh"
|
||||
DESTINATION_PATH = "_data/db_config.yaml"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DBConfigParser:
|
||||
|
||||
@@ -47,42 +56,18 @@ class DBConfigParser:
|
||||
"""
|
||||
COMMENT_PATTERN = r"/\*.*?\*/|//.*?$"
|
||||
|
||||
def __init__(self, config_file_path, config_header_file_path, destination_path):
|
||||
all_properties = {}
|
||||
|
||||
def __init__(self, config_file_path, config_header_file_path):
|
||||
self.config_file_path = config_file_path
|
||||
self.config_header_file_path = config_header_file_path
|
||||
self.destination_path = destination_path
|
||||
|
||||
def _create_yaml_file(self, destination, data):
|
||||
current_data = None
|
||||
|
||||
try:
|
||||
with open(destination, "r") as file:
|
||||
current_data = yaml.safe_load(file)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
if current_data != data:
|
||||
os.makedirs(os.path.dirname(destination), exist_ok=True)
|
||||
with open(destination, "w") as file:
|
||||
yaml.dump(data, file)
|
||||
|
||||
@staticmethod
|
||||
def _clean_description(description):
|
||||
return (
|
||||
description.replace("\\n", "")
|
||||
.replace('<', '<')
|
||||
.replace('>', '>')
|
||||
.replace("\n", "<br>")
|
||||
.replace("\\t", "- ")
|
||||
.replace('"', "")
|
||||
)
|
||||
|
||||
def _clean_comments(self, content):
|
||||
return re.sub(self.COMMENT_PATTERN, "", content, flags=re.DOTALL | re.MULTILINE)
|
||||
|
||||
def _parse_group(self, group_match, config_group_content):
|
||||
group_name = group_match.group(1).strip()
|
||||
group_description = self._clean_description(group_match.group(2).strip()) if group_match.group(2) else ""
|
||||
group_description = group_match.group(2).strip() if group_match.group(2) else ""
|
||||
|
||||
current_group = {
|
||||
"name": group_name,
|
||||
@@ -111,14 +96,16 @@ class DBConfigParser:
|
||||
config_matches = re.findall(self.CONFIG_CC_REGEX_PATTERN, content, re.DOTALL)
|
||||
|
||||
for match in config_matches:
|
||||
name = match[1].strip()
|
||||
property_data = {
|
||||
"name": match[1].strip(),
|
||||
"name": name,
|
||||
"value_status": match[4].strip(),
|
||||
"default": match[5].strip(),
|
||||
"liveness": "True" if match[3] else "False",
|
||||
"description": self._clean_description(match[6].strip()),
|
||||
"description": match[6].strip(),
|
||||
}
|
||||
properties.append(property_data)
|
||||
DBConfigParser.all_properties[name] = property_data
|
||||
|
||||
return properties
|
||||
|
||||
@@ -135,7 +122,7 @@ class DBConfigParser:
|
||||
if property_data["name"] == property_key:
|
||||
property_data["type"] = match[0].strip()
|
||||
|
||||
def _parse_db_properties(self):
|
||||
def parse(self):
|
||||
groups = []
|
||||
|
||||
with open(self.config_file_path, "r", encoding='utf-8') as file:
|
||||
@@ -158,26 +145,170 @@ class DBConfigParser:
|
||||
|
||||
return groups
|
||||
|
||||
def run(self, app: Sphinx):
|
||||
dest_path = os.path.join(app.builder.srcdir, self.destination_path)
|
||||
parsed_properties = self._parse_db_properties()
|
||||
self._create_yaml_file(dest_path, parsed_properties)
|
||||
@classmethod
|
||||
def get(cls, name: str):
|
||||
return DBConfigParser.all_properties[name]
|
||||
|
||||
|
||||
class DBConfigTemplateDirective(DataTemplateYAML):
|
||||
|
||||
option_spec = DataTemplateYAML.option_spec.copy()
|
||||
option_spec["value_status"] = directives.unchanged_required
|
||||
|
||||
def _make_context(self, data, config, env):
|
||||
context = super()._make_context(data, config, env)
|
||||
context["value_status"] = self.options.get("value_status")
|
||||
return context
|
||||
|
||||
|
||||
def setup(app: Sphinx):
|
||||
db_parser = DBConfigParser(
|
||||
CONFIG_FILE_PATH, CONFIG_HEADER_FILE_PATH, DESTINATION_PATH
|
||||
def readable_desc(description: str) -> str:
|
||||
return (
|
||||
description.replace("\\n", "")
|
||||
.replace('<', '<')
|
||||
.replace('>', '>')
|
||||
.replace("\n", "<br>")
|
||||
.replace("\\t", "- ")
|
||||
.replace('"', "")
|
||||
)
|
||||
app.connect("builder-inited", db_parser.run)
|
||||
app.add_directive("scylladb_config_template", DBConfigTemplateDirective)
|
||||
|
||||
|
||||
def maybe_add_filters(builder):
|
||||
env = builder.templates.environment
|
||||
if 'readable_desc' not in env.filters:
|
||||
env.filters['readable_desc'] = readable_desc
|
||||
|
||||
|
||||
class ConfigOption(ObjectDescription):
|
||||
has_content = True
|
||||
required_arguments = 1
|
||||
optional_arguments = 0
|
||||
final_argument_whitespace = False
|
||||
|
||||
# TODO: instead of overriding transform_content(), render option properties
|
||||
# as a field list.
|
||||
doc_field_types = [
|
||||
Field('type',
|
||||
label='Type',
|
||||
has_arg=False,
|
||||
names=('type',)),
|
||||
Field('default',
|
||||
label='Default value',
|
||||
has_arg=False,
|
||||
names=('default',)),
|
||||
Field('liveness',
|
||||
label='Liveness',
|
||||
has_arg=False,
|
||||
names=('liveness',)),
|
||||
]
|
||||
|
||||
def handle_signature(self,
|
||||
sig: str,
|
||||
signode: addnodes.desc_signature) -> str:
|
||||
signode.clear()
|
||||
signode += addnodes.desc_name(sig, sig)
|
||||
# normalize whitespace like XRefRole does
|
||||
return ws_re.sub(' ', sig)
|
||||
|
||||
@property
|
||||
def env(self):
|
||||
document = self.state.document
|
||||
return document.settings.env
|
||||
|
||||
def before_content(self) -> None:
|
||||
maybe_add_filters(self.env.app.builder)
|
||||
|
||||
def _render(self, name) -> str:
|
||||
item = DBConfigParser.get(name)
|
||||
if item is None:
|
||||
raise self.error(f'Option "{name}" not found!')
|
||||
builder = self.env.app.builder
|
||||
template = self.config.scylladb_cc_properties_option_tmpl
|
||||
return builder.templates.render(template, item)
|
||||
|
||||
def transform_content(self,
|
||||
contentnode: addnodes.desc_content) -> None:
|
||||
name = self.arguments[0]
|
||||
# the source is always None here
|
||||
_, lineno = self.get_source_info()
|
||||
source = f'scylla_config:{lineno}:<{name}>'
|
||||
fields = StringList(self._render(name).splitlines(),
|
||||
source=source, parent_offset=lineno)
|
||||
with switch_source_input(self.state, fields):
|
||||
self.state.nested_parse(fields, 0, contentnode)
|
||||
|
||||
def add_target_and_index(self,
|
||||
name: str,
|
||||
sig: str,
|
||||
signode: addnodes.desc_signature) -> None:
|
||||
node_id = make_id(self.env, self.state.document, self.objtype, name)
|
||||
signode['ids'].append(node_id)
|
||||
self.state.document.note_explicit_target(signode)
|
||||
entry = f'{name}; configuration option'
|
||||
self.indexnode['entries'].append(('pair', entry, node_id, '', None))
|
||||
std = self.env.get_domain('std')
|
||||
std.note_object(self.objtype, name, node_id, location=signode)
|
||||
|
||||
|
||||
class ConfigOptionList(SphinxDirective):
|
||||
has_content = False
|
||||
required_arguments = 2
|
||||
optional_arguments = 0
|
||||
final_argument_whitespace = True
|
||||
option_spec = {
|
||||
'template': directives.path,
|
||||
'value_status': directives.unchanged_required,
|
||||
}
|
||||
|
||||
@property
|
||||
def env(self):
|
||||
document = self.state.document
|
||||
return document.settings.env
|
||||
|
||||
def _resolve_src_path(self, path: str) -> str:
|
||||
rel_filename, filename = self.env.relfn2path(path)
|
||||
self.env.note_dependency(filename)
|
||||
return filename
|
||||
|
||||
def _render(self, context: Dict[str, Any]) -> str:
|
||||
builder = self.env.app.builder
|
||||
template = self.options.get('template')
|
||||
if template is None:
|
||||
self.error(f'Option "template" not specified!')
|
||||
return builder.templates.render(template, context)
|
||||
|
||||
def _make_context(self) -> Dict[str, Any]:
|
||||
header = self._resolve_src_path(self.arguments[0])
|
||||
source = self._resolve_src_path(self.arguments[1])
|
||||
db_parser = DBConfigParser(source, header)
|
||||
value_status = self.options.get("value_status")
|
||||
return dict(data=db_parser.parse(),
|
||||
value_status=value_status)
|
||||
|
||||
def run(self) -> List[nodes.Node]:
|
||||
maybe_add_filters(self.env.app.builder)
|
||||
rendered = self._render(self._make_context())
|
||||
contents = StringList(rendered.splitlines())
|
||||
node = nodes.section()
|
||||
node.document = self.state.document
|
||||
nested_parse_with_titles(self.state, contents, node)
|
||||
return node.children
|
||||
|
||||
|
||||
def setup(app: Sphinx) -> Dict[str, Any]:
|
||||
app.add_config_value(
|
||||
'scylladb_cc_properties_option_tmpl',
|
||||
default='db_option.tmpl',
|
||||
rebuild='html',
|
||||
types=[str])
|
||||
|
||||
app.add_object_type(
|
||||
'confgroup',
|
||||
'confgroup',
|
||||
objname='configuration group',
|
||||
indextemplate='pair: %s; configuration group',
|
||||
doc_field_types=[
|
||||
Field('example',
|
||||
label='Example',
|
||||
has_arg=False)
|
||||
])
|
||||
app.add_object_type(
|
||||
'confval',
|
||||
'confval',
|
||||
objname='configuration option')
|
||||
app.add_directive_to_domain('std', 'confval', ConfigOption, override=True)
|
||||
app.add_directive('scylladb_config_list', ConfigOptionList)
|
||||
|
||||
return {
|
||||
"version": "0.1",
|
||||
"parallel_read_safe": True,
|
||||
"parallel_write_safe": True,
|
||||
}
|
||||
|
||||
25
docs/_ext/scylladb_include_flag.py
Normal file
25
docs/_ext/scylladb_include_flag.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from sphinx.directives.other import Include
|
||||
from docutils.parsers.rst import directives
|
||||
|
||||
class IncludeFlagDirective(Include):
|
||||
option_spec = Include.option_spec.copy()
|
||||
option_spec['base_path'] = directives.unchanged
|
||||
|
||||
def run(self):
|
||||
env = self.state.document.settings.env
|
||||
base_path = self.options.get('base_path', '_common')
|
||||
|
||||
if env.app.tags.has('enterprise'):
|
||||
self.arguments[0] = base_path + "_enterprise/" + self.arguments[0]
|
||||
else:
|
||||
self.arguments[0] = base_path + "/" + self.arguments[0]
|
||||
return super().run()
|
||||
|
||||
def setup(app):
|
||||
app.add_directive('scylladb_include_flag', IncludeFlagDirective, override=True)
|
||||
|
||||
return {
|
||||
"version": "0.1",
|
||||
"parallel_read_safe": True,
|
||||
"parallel_write_safe": True,
|
||||
}
|
||||
16
docs/_static/css/custom.css
vendored
16
docs/_static/css/custom.css
vendored
@@ -17,10 +17,22 @@
|
||||
}
|
||||
|
||||
.content blockquote li p {
|
||||
margin-bottom: 10px;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
h3 .pre {
|
||||
font-size: 16px;
|
||||
font-weight: bold;
|
||||
}
|
||||
}
|
||||
|
||||
hr {
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
dl dt:hover > a.headerlink {
|
||||
visibility: visible;
|
||||
}
|
||||
|
||||
dl.confval {
|
||||
border-bottom: 1px solid #cacaca;
|
||||
}
|
||||
|
||||
14
docs/_templates/db_config.tmpl
vendored
14
docs/_templates/db_config.tmpl
vendored
@@ -8,22 +8,12 @@
|
||||
{% if group.description %}
|
||||
.. raw:: html
|
||||
|
||||
<p>{{ group.description }}</p>
|
||||
<p>{{ group.description | readable_desc }}</p>
|
||||
{% endif %}
|
||||
|
||||
{% for item in group.properties %}
|
||||
{% if item.value_status == value_status %}
|
||||
``{{ item.name }}``
|
||||
{{ '=' * (item.name|length + 4) }}
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<p>{{ item.description }}</p>
|
||||
|
||||
{% if item.type %}* **Type:** ``{{ item.type }}``{% endif %}
|
||||
{% if item.default %}* **Default value:** ``{{ item.default }}``{% endif %}
|
||||
{% if item.liveness %}* **Liveness** :term:`* <Liveness>` **:** ``{{ item.liveness }}``{% endif %}
|
||||
|
||||
.. confval:: {{ item.name }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
7
docs/_templates/db_option.tmpl
vendored
Normal file
7
docs/_templates/db_option.tmpl
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
.. raw:: html
|
||||
|
||||
<p>{{ description | readable_desc }}</p>
|
||||
|
||||
{% if type %}* **Type:** ``{{ type }}``{% endif %}
|
||||
{% if default %}* **Default value:** ``{{ default }}``{% endif %}
|
||||
{% if liveness %}* **Liveness** :term:`* <Liveness>` **:** ``{{ liveness }}``{% endif %}
|
||||
@@ -1,2 +1,107 @@
|
||||
### a dictionary of redirections
|
||||
#old path: new path
|
||||
#old path: new path
|
||||
|
||||
|
||||
# Removed the outdated upgrade guides
|
||||
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-ubuntu-14-to-16.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/upgrade-guide-from-2.x.y-to-2.x.z-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/upgrade-guide-from-2.x.y-to-2.x.z-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/upgrade-guide-from-2.x.y-to-2.x.z-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/upgrade-guide-from-2.1-to-2.2-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/upgrade-guide-from-2.1-to-2.2-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/upgrade-guide-from-2.1-to-2.2-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/metric-update-2.1-to-2.2.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/metric-update-2.2-to-2.3.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/metric-update-2.3-to-3.0.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/metric-update-3.0-to-3.1.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/metric-update-3.1-to-3.2.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/metric-update-3.2-to-3.3.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/metric-update-3.3-to-4.0.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/upgrade-guide-from-3.x.y-to-3.x.z-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/upgrade-guide-from-3.x.y-to-3.x.z-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/upgrade-guide-from-3.x.y-to-3.x.z-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/metric-update-4.0-to-4.1.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/upgrade-guide-from-4.x.y-to-4.x.z-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/upgrade-guide-from-4.x.y-to-4.x.z-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/upgrade-guide-from-4.x.y-to-4.x.z-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/metric-update-4.1-to-4.2.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/metric-update-4.2-to-4.3.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-ubuntu-20-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/metric-update-4.3-to-4.4.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-ubuntu-20-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/metric-update-4.4-to-4.5.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-ubuntu-20-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/metric-update-4.5-to-4.6.html: /stable/upgrade/index.html
|
||||
|
||||
|
||||
@@ -8,8 +8,7 @@ Scylla implements the following compaction strategies in order to reduce :term:`
|
||||
* `Size-tiered compaction strategy (STCS)`_ - triggered when the system has enough (four by default) similarly sized SSTables.
|
||||
* `Leveled compaction strategy (LCS)`_ - the system uses small, fixed-size (by default 160 MB) SSTables distributed across different levels.
|
||||
* `Incremental Compaction Strategy (ICS)`_ - shares the same read and write amplification factors as STCS, but it fixes its 2x temporary space amplification issue by breaking huge sstables into SSTable runs, which are comprised of a sorted set of smaller (1 GB by default), non-overlapping SSTables.
|
||||
* `Time-window compaction strategy (TWCS)`_ - designed for time series data; replaced Date-tiered compaction.
|
||||
* `Date-tiered compaction strategy (DTCS)`_ - designed for time series data.
|
||||
* `Time-window compaction strategy (TWCS)`_ - designed for time series data.
|
||||
|
||||
This document covers how to choose a compaction strategy and presents the benefits and disadvantages of each one. If you want more information on compaction in general or on any of these strategies, refer to the :doc:`Compaction Overview </kb/compaction>`. If you want an explanation of the CQL commands used to create a compaction strategy, refer to :doc:`Compaction CQL Reference </cql/compaction>` .
|
||||
|
||||
@@ -78,7 +77,6 @@ ICS is only available in ScyllaDB Enterprise. See the `ScyllaDB Enetrpise docume
|
||||
Time-window Compaction Strategy (TWCS)
|
||||
======================================
|
||||
|
||||
Time-window compaction strategy was introduced in Cassandra 3.0.8 for time-series data as a replacement for `Date-tiered Compaction Strategy (DTCS)`_.
|
||||
Time-Window Compaction Strategy compacts SSTables within each time window using `Size-tiered Compaction Strategy (STCS)`_.
|
||||
SSTables from different time windows are never compacted together. You set the :ref:`TimeWindowCompactionStrategy <time-window-compactionstrategy-twcs>` parameters when you create a table using a CQL command.
|
||||
|
||||
@@ -87,9 +85,8 @@ SSTables from different time windows are never compacted together. You set the :
|
||||
Time-window Compaction benefits
|
||||
-------------------------------
|
||||
|
||||
* Keeps entries according to a time range, making searches for data within a given range easy to do, resulting in better read performance
|
||||
* Improves over DTCS in that it reduces the number to huge compactions
|
||||
* Allows you to expire an entire SSTable at once (using a TTL) as the data is already organized within a time frame
|
||||
* Keeps entries according to a time range, making searches for data within a given range easy to do, resulting in better read performance.
|
||||
* Allows you to expire an entire SSTable at once (using a TTL) as the data is already organized within a time frame.
|
||||
|
||||
Time-window Compaction deficits
|
||||
-------------------------------
|
||||
@@ -102,14 +99,6 @@ Set the parameters for :ref:`Time-window Compaction <time-window-compactionstrat
|
||||
|
||||
Use the table in `Which strategy is best`_ to determine if this is the right strategy for your needs.
|
||||
|
||||
.. _DTCS1:
|
||||
|
||||
Date-tiered Compaction Strategy (DTCS)
|
||||
======================================
|
||||
|
||||
Date-Tiered Compaction is designed for time series data. This strategy was introduced with Cassandra 2.1.
|
||||
It is only suitable for time-series data. This strategy is not recommended and has been replaced by :ref:`Time-window Compaction Strategy <TWCS1>`.
|
||||
|
||||
.. _which-strategy-is-best:
|
||||
|
||||
Which strategy is best
|
||||
|
||||
@@ -37,7 +37,12 @@ Enabling Raft
|
||||
|
||||
.. note::
|
||||
In ScyllaDB 5.2 and ScyllaDB Enterprise 2023.1 Raft is Generally Available and can be safely used for consistent schema management.
|
||||
In further versions, it will be mandatory.
|
||||
It will get enabled by default when you upgrade your cluster to ScyllaDB 5.4 or 2024.1.
|
||||
If needed, you can explicitly prevent it from getting enabled upon upgrade.
|
||||
|
||||
.. only:: opensource
|
||||
|
||||
See :doc:`the upgrade guide from 5.2 to 5.4 </upgrade/index>` for details.
|
||||
|
||||
ScyllaDB Open Source 5.2 and later, and ScyllaDB Enterprise 2023.1 and later come equipped with a procedure that can setup Raft-based consistent cluster management in an existing cluster. We refer to this as the **Raft upgrade procedure** (do not confuse with the :doc:`ScyllaDB version upgrade procedure </upgrade/index/>`).
|
||||
|
||||
@@ -214,6 +219,36 @@ of nodes in the cluster is available. The following examples illustrate how Raft
|
||||
|
||||
In summary, Raft makes schema changes safe, but it requires that a quorum of nodes in the cluster is available.
|
||||
|
||||
.. _raft-topology-changes:
|
||||
|
||||
.. only:: opensource
|
||||
|
||||
Consistent Topology with Raft :label-caution:`Experimental`
|
||||
-----------------------------------------------------------------
|
||||
|
||||
ScyllaDB can use Raft to manage cluster topology. With Raft-managed topology
|
||||
enabled, all topology operations are internally sequenced in a consistent
|
||||
way. A centralized coordination process ensures that topology metadata is
|
||||
synchronized across the nodes on each step of a topology change procedure.
|
||||
This makes topology updates fast and safe, as the cluster administrator can
|
||||
trigger many topology operations concurrently, and the coordination process
|
||||
will safely drive all of them to completion. For example, multiple nodes can
|
||||
be bootstrapped concurrently, which couldn't be done with the old
|
||||
gossip-based topology.
|
||||
|
||||
Support for Raft-managed topology is experimental and must be explicitly
|
||||
enabled in the ``scylla.yaml`` configuration file by specifying
|
||||
the ``consistent-topology-changes`` option:
|
||||
|
||||
.. code::
|
||||
|
||||
experimental_features:
|
||||
- consistent-topology-changes
|
||||
|
||||
As with other experimental features in ScyllaDB, you should not enable this
|
||||
feature in production clusters due to insufficient stability. The feature
|
||||
is undergoing backward-incompatible changes that may prevent upgrading
|
||||
the cluster.
|
||||
|
||||
.. _raft-handling-failures:
|
||||
|
||||
|
||||
@@ -39,7 +39,8 @@ extensions = [
|
||||
"recommonmark", # optional
|
||||
"sphinxcontrib.datatemplates",
|
||||
"scylladb_cc_properties",
|
||||
"scylladb_aws_images"
|
||||
"scylladb_aws_images",
|
||||
"scylladb_include_flag"
|
||||
]
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
|
||||
@@ -19,8 +19,6 @@ The following compaction strategies are supported by Scylla:
|
||||
|
||||
* Time-window Compaction Strategy (`TWCS`_)
|
||||
|
||||
* Date-tiered Compaction Strategy (DTCS) - use `TWCS`_ instead
|
||||
|
||||
This page concentrates on the parameters to use when creating a table with a compaction strategy. If you are unsure which strategy to use or want general information on the compaction strategies which are available to Scylla, refer to :doc:`Compaction Strategies </architecture/compaction/compaction-strategies>`.
|
||||
|
||||
Common options
|
||||
|
||||
@@ -79,27 +79,66 @@ and to the TRUNCATE data definition query.
|
||||
|
||||
In addition, the timeout parameter can be applied to SELECT queries as well.
|
||||
|
||||
```eval_rst
|
||||
.. _keyspace-storage-options:
|
||||
```
|
||||
|
||||
## Keyspace storage options
|
||||
|
||||
Storage options allows specifying the storage format assigned to a keyspace.
|
||||
The default storage format is `LOCAL`, which simply means storing all the sstables
|
||||
in a local directory.
|
||||
Experimental support for `S3` storage format is also added. This option is not fully
|
||||
implemented yet, but it will allow storing sstables in a shared, S3-compatible object store.
|
||||
<!---
|
||||
This section must be moved to Data Definition> CREATE KEYSPACE
|
||||
when support for object storage is GA.
|
||||
--->
|
||||
|
||||
Storage options can be specified via `CREATE KEYSPACE` or `ALTER KEYSPACE` statement
|
||||
and it's formatted as a map of options - similarly to how replication strategy is handled.
|
||||
By default, SStables of a keyspace are stored in a local directory.
|
||||
As an alternative, you can configure your keyspace to be stored
|
||||
on Amazon S3 or another S3-compatible object store.
|
||||
|
||||
Examples:
|
||||
```cql
|
||||
CREATE KEYSPACE ks
|
||||
WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 }
|
||||
AND STORAGE = { 'type' : 'S3', 'bucket' : '/tmp/b1', 'endpoint' : 'localhost' } ;
|
||||
Support for object storage is experimental and must be explicitly
|
||||
enabled in the ``scylla.yaml`` configuration file by specifying
|
||||
the ``keyspace-storage-options`` option:
|
||||
|
||||
```
|
||||
experimental_features:
|
||||
- keyspace-storage-options
|
||||
```
|
||||
|
||||
With support for object storage enabled, add your endpoint configuration
|
||||
to ``scylla.yaml``:
|
||||
|
||||
1. Create an ``object-storage-config-file.yaml`` file with a description of
|
||||
allowed endpoints, for example:
|
||||
|
||||
```
|
||||
endpoints:
|
||||
- name: $endpoint_address_or_domain_name
|
||||
port: $port_number
|
||||
https: optional True or False
|
||||
aws_region: optional region name, e.g. us-east-1
|
||||
aws_access_key_id: optional AWS access key ID
|
||||
aws_secret_access_key: optional AWS secret access key
|
||||
aws_session_token: optional AWS session token
|
||||
```
|
||||
1. Specify the ``object-storage-config-file`` option in your ``scylla.yaml``,
|
||||
providing ``object-storage-config-file.yaml`` as the value:
|
||||
|
||||
```
|
||||
object-storage-config-file: object-storage-config-file.yaml
|
||||
```
|
||||
|
||||
|
||||
Now you can configure your object storage when creating a keyspace:
|
||||
|
||||
```cql
|
||||
ALTER KEYSPACE ks WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 }
|
||||
AND STORAGE = { 'type' : 'S3', 'bucket': '/tmp/b2', 'endpoint' : 'localhost' } ;
|
||||
CREATE KEYSPACE with STORAGE = { 'type': 'S3', 'endpoint': '$endpoint_name', 'bucket': '$bucket' }
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
```cql
|
||||
CREATE KEYSPACE ks
|
||||
WITH REPLICATION = { 'class' : 'NetworkTopologyStrategy', 'replication_factor' : 3 }
|
||||
AND STORAGE = { 'type' : 'S3', 'bucket' : '/tmp/b1', 'endpoint' : 'localhost' } ;
|
||||
```
|
||||
|
||||
Storage options can be inspected by checking the new system schema table: `system_schema.scylla_keyspaces`:
|
||||
|
||||
@@ -6,18 +6,26 @@
|
||||
CQLSh: the CQL shell
|
||||
--------------------
|
||||
|
||||
cqlsh is a command line shell for interacting with Cassandra through CQL (the Cassandra Query Language). It is shipped
|
||||
with every Cassandra package and can be found in the bin/ directory alongside the Cassandra executable. cqlsh utilizes
|
||||
the Python native protocol driver and connects to the single node specified on the command line.
|
||||
cqlsh is a command line shell for interacting with ScyllaDB through CQL
|
||||
(the Cassandra Query Language). It is shipped with every ScyllaDB package
|
||||
and can be found in the ``bin/`` directory. In addition, it is available on
|
||||
`Docker Hub <https://hub.docker.com/r/scylladb/scylla-cqlsh>`_ and in
|
||||
the `Python Package Index (PyPI) <https://pypi.org/project/scylla-cqlsh/>`_.
|
||||
|
||||
cqlsh utilizes the Python native protocol driver and connects to the single
|
||||
node specified on the command line.
|
||||
|
||||
See the `scylla-cqlsh <https://github.com/scylladb/scylla-cqlsh>`_ repository
|
||||
on GitHub for usage examples.
|
||||
|
||||
|
||||
Compatibility
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
cqlsh is compatible with Python 2.7.
|
||||
cqlsh is compatible with Python 3.8 - Python 3.11.
|
||||
|
||||
In general, a given version of cqlsh is only guaranteed to work with the version of Cassandra that it was released with.
|
||||
In some cases, cqlsh may work with older or newer versions of Cassandra, but this is not officially supported.
|
||||
A given version of cqlsh is only guaranteed to work with the version of ScyllaDB that it was released with.
|
||||
cqlsh may work with older or newer versions of ScyllaDB without any guarantees.
|
||||
|
||||
|
||||
Optional Dependencies
|
||||
@@ -72,13 +80,13 @@ Options:
|
||||
``/usr/bin/google-chrome-stable %s``).
|
||||
|
||||
``--ssl``
|
||||
Use SSL when connecting to Cassandra
|
||||
Use SSL when connecting to ScyllaDB.
|
||||
|
||||
``-u`` ``--user``
|
||||
Username to authenticate against Cassandra with
|
||||
Username to authenticate against ScyllaDB.
|
||||
|
||||
``-p`` ``--password``
|
||||
The password to authenticate against Cassandra with should
|
||||
The password to authenticate against ScyllaDB, which should
|
||||
be used in conjunction with ``--user``
|
||||
|
||||
``-k`` ``--keyspace``
|
||||
@@ -162,17 +170,17 @@ consistency ``ALL`` is not guaranteed to be enough).
|
||||
|
||||
SHOW VERSION
|
||||
~~~~~~~~~~~~
|
||||
This command is useful if you want to check which Cassandra version is compatible with your Scylla version.
|
||||
This command is useful if you want to check which Cassandra version is compatible with your ScyllaDB version.
|
||||
Note that the two standards are not 100% identical and this command is simply a comparison tool.
|
||||
|
||||
If you want to display your current Scylla Version, refer to :ref:`Check your current version of Scylla <check-your-current-version-of-scylla>`.
|
||||
If you want to display your current ScyllaDB version, refer to :ref:`Check your current version of Scylla <check-your-current-version-of-scylla>`.
|
||||
|
||||
The display shows:
|
||||
|
||||
* The cqlsh tool version that you're using
|
||||
* The Apache Cassandra version that your version of Scylla is most compatible with
|
||||
* The CQL protocol standard that your version of Scylla is most compatible with
|
||||
* The native protocol standard that your version of Scylla is most compatible with
|
||||
* The Apache Cassandra version that your version of ScyllaDB is most compatible with
|
||||
* The CQL protocol standard that your version of ScyllaDB is most compatible with
|
||||
* The native protocol standard that your version of ScyllaDB is most compatible with
|
||||
|
||||
Example:
|
||||
|
||||
@@ -191,7 +199,7 @@ Returns:
|
||||
SHOW HOST
|
||||
~~~~~~~~~
|
||||
|
||||
Prints the IP address and port of the Cassandra node that cqlsh is connected to in addition to the cluster name.
|
||||
Prints the IP address and port of the ScyllaDB node that cqlsh is connected to in addition to the cluster name.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -324,7 +332,7 @@ contents of a single column are large.
|
||||
LOGIN
|
||||
~~~~~
|
||||
|
||||
Authenticate as a specified Cassandra user for the current session.
|
||||
Authenticate as a specified ScyllaDB user for the current session.
|
||||
|
||||
`Usage`::
|
||||
|
||||
|
||||
@@ -198,6 +198,18 @@ An example that excludes a datacenter while using ``replication_factor``::
|
||||
DESCRIBE KEYSPACE excalibur
|
||||
CREATE KEYSPACE excalibur WITH replication = {'class': 'NetworkTopologyStrategy', 'DC1': '3'} AND durable_writes = true;
|
||||
|
||||
|
||||
|
||||
.. only:: opensource
|
||||
|
||||
Keyspace storage options :label-caution:`Experimental`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
By default, SStables of a keyspace are stored locally.
|
||||
As an alternative, you can configure your keyspace to be stored
|
||||
on Amazon S3 or another S3-compatible object store.
|
||||
See :ref:`Keyspace storage options <keyspace-storage-options>` for details.
|
||||
|
||||
.. _use-statement:
|
||||
|
||||
USE
|
||||
@@ -687,19 +699,12 @@ Compaction options
|
||||
|
||||
The ``compaction`` options must at least define the ``'class'`` sub-option, which defines the compaction strategy class
|
||||
to use. The default supported class are ``'SizeTieredCompactionStrategy'``,
|
||||
``'LeveledCompactionStrategy'``, ``'IncrementalCompactionStrategy'``, and ``'DateTieredCompactionStrategy'``
|
||||
``'LeveledCompactionStrategy'``, and ``'IncrementalCompactionStrategy'``.
|
||||
Custom strategy can be provided by specifying the full class name as a :ref:`string constant
|
||||
<constants>`.
|
||||
|
||||
All default strategies support a number of common options, as well as options specific to
|
||||
the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS <stcs-options>`, :ref:`LCS <lcs-options>`, and :ref:`TWCS <twcs-options>`). DTCS is not recommended, and TWCS should be used instead.
|
||||
|
||||
|
||||
.. ``'Date Tiered Compaction Strategy is not recommended and has been replaced by Time Window Compaction Stragegy.'`` (:ref:`TWCS <TWCS>`) (the
|
||||
.. is also supported but is deprecated and ``'TimeWindowCompactionStrategy'`` should be
|
||||
.. preferred instead).
|
||||
|
||||
|
||||
the strategy chosen (see the section corresponding to your strategy for details: :ref:`STCS <stcs-options>`, :ref:`LCS <lcs-options>`, and :ref:`TWCS <twcs-options>`).
|
||||
|
||||
.. _cql-compression-options:
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ Wasm support for user-defined functions
|
||||
This document describes the details of Wasm language support in user-defined functions (UDF). The language ``wasm`` is one of the possible languages to use, besides Lua, to implement these functions. To learn more about User-defined functions in ScyllaDB, click :ref:`here <udfs>`.
|
||||
|
||||
|
||||
.. note:: Until ScyllaDB 5.2, the Wasm language was called ``xwasm``. This name is replaced with ``wasm`` in ScyllaDB 5.3.
|
||||
.. note:: Until ScyllaDB 5.2, the Wasm language was called ``xwasm``. This name is replaced with ``wasm`` in ScyllaDB 5.4.
|
||||
|
||||
How to generate a correct Wasm UDF source code
|
||||
----------------------------------------------
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
.. |UBUNTU_SCYLLADB_LIST| replace:: scylla-5.2.list
|
||||
.. |CENTOS_SCYLLADB_REPO| replace:: scylla-5.2.repo
|
||||
.. |UBUNTU_SCYLLADB_LIST| replace:: scylla-5.4.list
|
||||
.. |CENTOS_SCYLLADB_REPO| replace:: scylla-5.4.repo
|
||||
|
||||
.. The |RHEL_EPEL| variable needs to be adjuster per release, depening on support for RHEL.
|
||||
.. 5.2 supports Rocky/RHEL 8
|
||||
.. When RHEL 9 is supported, add https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
|
||||
.. |RHEL_EPEL| replace:: https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
||||
.. 5.2 supports Rocky/RHEL 8 only
|
||||
.. 5.4 supports Rocky/RHEL 8 and 9
|
||||
.. |RHEL_EPEL_8| replace:: https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
||||
.. |RHEL_EPEL_9| replace:: https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
|
||||
|
||||
======================================
|
||||
Install ScyllaDB Linux Packages
|
||||
@@ -113,14 +114,21 @@ Install ScyllaDB
|
||||
sudo yum install epel-release
|
||||
|
||||
|
||||
RHEL:
|
||||
Rocky/RHEL 8
|
||||
|
||||
.. code-block:: console
|
||||
:substitutions:
|
||||
|
||||
sudo yum -y install |RHEL_EPEL|
|
||||
sudo yum -y install |RHEL_EPEL_8|
|
||||
|
||||
|
||||
Rocky/RHEL 9
|
||||
|
||||
.. code-block:: console
|
||||
:substitutions:
|
||||
|
||||
sudo yum -y install |RHEL_EPEL_9|
|
||||
|
||||
#. Add the ScyllaDB RPM repository to your system.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
@@ -8,8 +8,14 @@ as-a-service, see `ScyllaDB Cloud documentation <https://cloud.docs.scylladb.com
|
||||
Launching Instances from ScyllaDB AMI
|
||||
---------------------------------------
|
||||
|
||||
#. Go to `Amazon EC2 AMIs – ScyllaDB <https://www.scylladb.com/download/?platform=aws#open-source>`_ in ScyllaDB's download center,
|
||||
choose your region, and click the **Node** link to open the EC2 instance creation wizard.
|
||||
#. Choose your region, and click the **Node** link to open the EC2 instance creation wizard.
|
||||
|
||||
The following table shows the latest patch release. See :doc:`AWS Images </reference/aws-images/>` for earlier releases.
|
||||
|
||||
.. scylladb_aws_images_template::
|
||||
:exclude: rc,dev
|
||||
:only_latest:
|
||||
|
||||
#. Choose the instance type. See :ref:`Cloud Instance Recommendations for AWS <system-requirements-aws>` for the list of recommended instances.
|
||||
|
||||
Other instance types will work, but with lesser performance. If you choose an instance type other than the recommended ones, make sure to run the :ref:`scylla_setup <system-configuration-scripts>` script.
|
||||
|
||||
@@ -5,8 +5,6 @@ The following matrix shows which Linux distributions, containers, and images are
|
||||
|
||||
Where *supported* in this scope means:
|
||||
|
||||
.. REMOVE IN FUTURE VERSIONS - Remove information about versions from the notes below in version 5.2.
|
||||
|
||||
- A binary installation package is available to `download <https://www.scylladb.com/download/>`_.
|
||||
- The download and install procedures are tested as part of ScyllaDB release process for each version.
|
||||
- An automated install is included from :doc:`ScyllaDB Web Installer for Linux tool </getting-started/installation-common/scylla-web-installer>` (for latest versions)
|
||||
@@ -27,39 +25,18 @@ ScyllaDB Open Source
|
||||
|
||||
The recommended OS for ScyllaDB Open Source is Ubuntu 22.04.
|
||||
|
||||
+----------------------------+----------------------------------+-----------------------------+---------+---------------+
|
||||
| Linux Distributions | Ubuntu | Debian | CentOS /| Rocky / |
|
||||
| | | | RHEL | RHEL |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| ScyllaDB Version / Version | 14.04| 16.04| 18.04|20.04 |22.04 | 8 | 9 | 10 | 11 | 7 | 8 | 9 |
|
||||
+============================+======+======+======+======+======+======+======+=======+=======+=========+=======+=======+
|
||||
| 5.3 | |x| | |x| | |x| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 5.2 | |x| | |x| | |x| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 5.1 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 5.0 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.6 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.5 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.4 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.3 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.2 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.1 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
| 4.0 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |v| | |x| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+-------+
|
||||
|
||||
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 4.3). Since
|
||||
version 5.2, the ScyllaDB AMI/Image OS for ScyllaDB Open Source is based on Ubuntu 22.04.
|
||||
+----------------------------+-------------+---------------+---------+---------------+
|
||||
| Linux Distributions |Ubuntu | Debian | CentOS /| Rocky / |
|
||||
| | | | RHEL | RHEL |
|
||||
+----------------------------+------+------+-------+-------+---------+-------+-------+
|
||||
| ScyllaDB Version / Version |20.04 |22.04 | 10 | 11 | 7 | 8 | 9 |
|
||||
+============================+======+======+=======+=======+=========+=======+=======+
|
||||
| 5.4 | |v| | |v| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+-------+-------+---------+-------+-------+
|
||||
| 5.2 | |v| | |v| | |v| | |v| | |v| | |v| | |x| |
|
||||
+----------------------------+------+------+-------+-------+---------+-------+-------+
|
||||
|
||||
All releases are available as a Docker container and EC2 AMI, GCP, and Azure images.
|
||||
|
||||
|
||||
ScyllaDB Enterprise
|
||||
|
||||
@@ -23,7 +23,7 @@ It’s recommended to have a balanced setup. If there are only 4-8 :term:`Logica
|
||||
This works in the opposite direction as well.
|
||||
ScyllaDB can be used in many types of installation environments.
|
||||
|
||||
To see which system would best suit your workload requirements, use the `ScyllaDB Sizing Calculator <https://price-calc.gh.scylladb.com/>`_ to customize ScyllaDB for your usage.
|
||||
To see which system would best suit your workload requirements, use the `ScyllaDB Sizing Calculator <https://www.scylladb.com/product/scylla-cloud/get-pricing/>`_ to customize ScyllaDB for your usage.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -44,8 +44,7 @@ A compaction strategy is what determines which of the SSTables will be compacted
|
||||
* `Size-tiered compaction strategy (STCS)`_ - (default setting) triggered when the system has enough similarly sized SSTables.
|
||||
* `Leveled compaction strategy (LCS)`_ - the system uses small, fixed-size (by default 160 MB) SSTables divided into different levels and lowers both Read and Space Amplification.
|
||||
* :ref:`Incremental compaction strategy (ICS) <incremental-compaction-strategy-ics>` - :label-tip:`ScyllaDB Enterprise` Uses runs of sorted, fixed size (by default 1 GB) SSTables in a similar way that LCS does, organized into size-tiers, similar to STCS size-tiers. If you are an Enterprise customer ICS is an updated strategy meant to replace STCS. It has the same read and write amplification, but has lower space amplification due to the reduction of temporary space overhead is reduced to a constant manageable level.
|
||||
* `Time-window compaction strategy (TWCS)`_ - designed for time series data and puts data in time order. This strategy replaced Date-tiered compaction. TWCS uses STCS to prevent accumulating SSTables in a window not yet closed. When the window closes, TWCS works towards reducing the SSTables in a time window to one.
|
||||
* `Date-tiered compaction strategy (DTCS)`_ - designed for time series data, but TWCS should be used instead.
|
||||
* `Time-window compaction strategy (TWCS)`_ - designed for time series data and puts data in time order. TWCS uses STCS to prevent accumulating SSTables in a window not yet closed. When the window closes, TWCS works towards reducing the SSTables in a time window to one.
|
||||
|
||||
How to Set a Compaction Strategy
|
||||
................................
|
||||
@@ -125,7 +124,7 @@ ICS is only available in ScyllaDB Enterprise. See the `ScyllaDB Enetrpise docume
|
||||
Time-window Compaction Strategy (TWCS)
|
||||
--------------------------------------
|
||||
|
||||
Time-window compaction strategy was introduced as a replacement for `Date-tiered Compaction Strategy (DTCS)`_ for handling time series workloads. Time-Window Compaction Strategy compacts SSTables within each time window using `Size-tiered Compaction Strategy (STCS)`_. SSTables from different time windows are never compacted together.
|
||||
Time-Window Compaction Strategy is designed for handling time series workloads. It compacts SSTables within each time window using `Size-tiered Compaction Strategy (STCS)`_. SSTables from different time windows are never compacted together.
|
||||
|
||||
.. include:: /rst_include/warning-ttl-twcs.rst
|
||||
|
||||
@@ -148,22 +147,6 @@ The primary motivation for TWCS is to separate data on disk by timestamp and to
|
||||
|
||||
While TWCS tries to minimize the impact of commingled data, users should attempt to avoid this behavior. Specifically, users should avoid queries that explicitly set the timestamp. It is recommended to run frequent repairs (which streams data in such a way that it does not become commingled), and disable background read repair by setting the table’s :ref:`read_repair_chance <create-table-general-options>` and :ref:`dclocal_read_repair_chance <create-table-general-options>` to ``0``.
|
||||
|
||||
|
||||
Date-tiered compaction strategy (DTCS)
|
||||
--------------------------------------
|
||||
|
||||
Date-Tiered Compaction is designed for time series data. It is only suitable for time-series data. This strategy has been replaced by `Time-window compaction strategy (TWCS)`_.
|
||||
|
||||
Date-tiered compaction strategy works as follows:
|
||||
|
||||
* First it sorts the SSTables by time and then compacts adjacent (time-wise) SSTables.
|
||||
* This results in SSTables whose sizes increase exponentially as they grow older.
|
||||
|
||||
For example, at some point we can have the last minute of data in one SSTable (by default, base_time_seconds = 60), another minute before that in another SSTable, then the 4 minutes before that in one SSTable, then the 4 minutes before that, then an SSTable of the 16 minutes before that, and so on. This structure can easily be maintained by compaction, very similar to size-tiered compaction. When there are 4 (the default value for min_threshold) small (one-minute) consecutive SSTables, they are compacted into one 4-minute SSTable. When there are 4 of the bigger SSTables one after another (time-wise), they are merged into a 16-minute SSTable, and so on.
|
||||
|
||||
Antique SSTables older than ``max_SSTable_age_days`` (by default 365 days) are not compacted as doing these compactions would not be useful for most queries, the process would be very slow, and the compaction would require huge amounts of temporary disk space.
|
||||
|
||||
|
||||
Changing Compaction Strategies or Properties
|
||||
--------------------------------------------
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ Examples
|
||||
|
||||
nodetool compact
|
||||
nodetool compact keyspace1
|
||||
nodetool compact standard1
|
||||
nodetool compact keyspace1 standard1
|
||||
|
||||
See Also
|
||||
--------
|
||||
|
||||
@@ -1,12 +1,34 @@
|
||||
Nodetool flush
|
||||
==============
|
||||
**flush** ``[<keyspace> <cfnames>...]``- Specify a keyspace and one or more tables that you want to flush from the memtable to on disk SSTables.
|
||||
**flush** - Flush memtables to on-disk SSTables in the specified keyspace and table(s).
|
||||
|
||||
For example:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
nodetool flush keyspaces1 standard1
|
||||
nodetool flush
|
||||
nodetool flush keyspace1
|
||||
nodetool flush keyspace1 standard1
|
||||
|
||||
Syntax
|
||||
------
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
nodetool flush [<keyspace> [<table> ...]]
|
||||
|
||||
nodetool flush takes the following parameters:
|
||||
|
||||
.. list-table::
|
||||
:widths: 50 50
|
||||
:header-rows: 1
|
||||
|
||||
* - Parameter Name
|
||||
- Description
|
||||
* - ``<keyspace>``
|
||||
- The keyspace to operate on. If omitted, all keyspaces are flushed.
|
||||
* - ``<table> ...``
|
||||
- One or more tables to operate on. Tables may be specified only if a keyspace is given. If omitted, all tables in the specified keyspace are flushed.
|
||||
|
||||
See also
|
||||
|
||||
|
||||
@@ -70,7 +70,6 @@ To display the log classes (output changes with each version so your display may
|
||||
cql_server
|
||||
storage_proxy
|
||||
cache
|
||||
DateTieredCompactionStrategy
|
||||
schema_tables
|
||||
rpc
|
||||
compaction_manager
|
||||
|
||||
@@ -1,25 +1,36 @@
|
||||
Repair Based Node Operations
|
||||
****************************
|
||||
====================================
|
||||
Repair-Based Node Operations (RBNO)
|
||||
====================================
|
||||
|
||||
Scylla has two use cases for transferring data between nodes:
|
||||
In ScyllaDB, data is transferred between nodes during:
|
||||
|
||||
- Topology changes, like adding and removing nodes.
|
||||
- Repair, a background process to compare and sync data between nodes.
|
||||
* Topology changes via node operations, such as adding or removing nodes.
|
||||
* Repair - a row-level background process to compare and sync data between nodes.
|
||||
|
||||
Up to Scylla 4.6, the two used different underline logic. In later releases, the same data transferring logic used for repair is also used for topology changes, making it more robust, reliable, and safer for data consistency. In particular, node operations can restart from the same point it stopped without sending data that has been synced, a significant time-saver when adding or removing large nodes.
|
||||
In 4.6, Repair Based Node Operations (RBNO) is enabled by default only for replace node operation.
|
||||
Example from scylla.yaml:
|
||||
By default, the row-level repair mechanism used for the repair process is also
|
||||
used during node operations (instead of streaming). We refer to it as
|
||||
Repair-Based Node Operations (RBNO).
|
||||
|
||||
.. code:: yaml
|
||||
|
||||
enable_repair_based_node_ops: true
|
||||
allowed_repair_based_node_ops: replace
|
||||
RBNO is more robust, reliable, and safer for data consistency than streaming.
|
||||
In particular, a failed node operation can resume from the point it stopped -
|
||||
without sending data that has already been synced, which is a significant
|
||||
time-saver when adding or removing large nodes. In addition, with RBNO enabled,
|
||||
you don't need to run repair befor or after node operations, such as replace
|
||||
or removenode.
|
||||
|
||||
To enable other operations (experimental), add them as a comma-separated list to allowed_repair_based_node_ops. Available operations are:
|
||||
RBNO is enabled for the following node operations:
|
||||
|
||||
* bootstrap
|
||||
* replace
|
||||
* removenode
|
||||
* decommission
|
||||
* rebuild
|
||||
* removenode
|
||||
* replace
|
||||
|
||||
The following configuration options can be used to enable or disable RBNO:
|
||||
|
||||
* ``enable_repair_based_node_ops= true|false`` - Enables or disables RBNO.
|
||||
* ``allowed_repair_based_node_ops= "replace,removenode,rebuild,bootstrap,decommission"`` -
|
||||
Specifies the node operations for which the RBNO mechanism is enabled.
|
||||
|
||||
See :doc:`Configuration Parameters </reference/configuration-parameters/>` for details.
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
==========
|
||||
AWS images
|
||||
AWS Images
|
||||
==========
|
||||
|
||||
.. scylladb_aws_images_template::
|
||||
:version: 5.2
|
||||
:exclude: rc,dev
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
========================
|
||||
Configuration parameters
|
||||
Configuration Parameters
|
||||
========================
|
||||
|
||||
This section contains a list of properties that can be configured in ``scylla.yaml`` - the main configuration file for ScyllaDB.
|
||||
In addition, properties that support live updates (liveness) can be updated via the ``system.config`` virtual table or the REST API.
|
||||
|
||||
.. scylladb_config_template:: ../_data/db_config.yaml
|
||||
.. scylladb_config_list:: ../../db/config.hh ../../db/config.cc
|
||||
:template: db_config.tmpl
|
||||
:value_status: Used
|
||||
|
||||
@@ -37,9 +37,6 @@ Glossary
|
||||
Quorum
|
||||
Quorum is a *global* consistency level setting across the entire cluster including all data centers. See :doc:`Consistency Levels </cql/consistency>`.
|
||||
|
||||
Date-tiered compaction strategy (DTCS)
|
||||
:abbr:`DTCS (Date-tiered compaction strategy)` is designed for time series data, but should not be used. Use :term:`Time-Window Compaction Strategy`. See :doc:`Compaction Strategies</architecture/compaction/compaction-strategies/>`.
|
||||
|
||||
Entropy
|
||||
A state where data is not consistent. This is the result when replicas are not synced and data is random. Scylla has measures in place to be antientropic. See :doc:`Scylla Anti-Entropy </architecture/anti-entropy/index>`.
|
||||
|
||||
@@ -151,7 +148,7 @@ Glossary
|
||||
A collection of columns fetched by row. Columns are ordered by Clustering Key. See :doc:`Ring Architecture </architecture/ringarchitecture/index>`.
|
||||
|
||||
Time-window compaction strategy
|
||||
TWCS is designed for time series data and replaced Date-tiered compaction. See :doc:`Compaction Strategies</architecture/compaction/compaction-strategies/>`.
|
||||
TWCS is designed for time series data. See :doc:`Compaction Strategies</architecture/compaction/compaction-strategies/>`.
|
||||
|
||||
Token
|
||||
A value in a range, used to identify both nodes and partitions. Each node in a Scylla cluster is given an (initial) token, which defines the end of the range a node handles. See :doc:`Ring Architecture </architecture/ringarchitecture/index>`.
|
||||
|
||||
@@ -3,7 +3,7 @@ Reference
|
||||
===============
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:maxdepth: 1
|
||||
:glob:
|
||||
|
||||
/reference/*
|
||||
@@ -14,6 +14,8 @@ The following table shows ScyllaDB Enterprise versions and their corresponding S
|
||||
|
||||
* - ScyllaDB Enterprise
|
||||
- ScyllaDB Open Source
|
||||
* - 2024.1
|
||||
- 5.4
|
||||
* - 2023.1
|
||||
- 5.2
|
||||
* - 2022.2
|
||||
|
||||
@@ -12,7 +12,7 @@ the ``/etc/systemd/system/var-lib-scylla.mount`` and ``/etc/systemd/system/var-l
|
||||
deleted by RPM.
|
||||
|
||||
To avoid losing the files, the upgrade procedure includes a step to backup the .mount files. The following
|
||||
example shows the command to backup the files before the :doc:`upgrade from version 5.0 </upgrade/upgrade-to-enterprise/upgrade-guide-from-5.0-to-2022.1/upgrade-guide-from-5.0-to-2022.1-rpm/>`:
|
||||
example shows the command to backup the files before the upgrade from version 5.0:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.6 to 1.7 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.6 to Scylla 1.7, and rollback to 1.6 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.6.x to Scylla version 1.7.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.7 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.6
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 1.6.x version, stop right here! This guide only covers 1.6.x to 1.7.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **1.7**
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 1.7
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.7.x to 1.6.y. Apply this procedure if an upgrade from 1.6 to 1.7 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.7
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.6, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **1.6**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.6 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,190 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.7 to 2.0 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.7 to Scylla 2.0, and rollback to 1.7 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.7.x (x >= 4) to Scylla version 2.0.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* check cluster schema
|
||||
* drain node and backup the data
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.0 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.7
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 1.7.x version, stop right here! This guide only covers 1.7.x to 2.0.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.0**
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.0
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 1.7 to 2.0<metric-update-1.7-to-2.0>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.0.x to 1.7.y (y >= 4). Apply this procedure if an upgrade from 1.7 to 2.0 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.0
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.7, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **1.7**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.7 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.0 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,157 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 1.x.y to 1.x.z for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.x.y to Scylla 1.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.x.y to Scylla version 1.x.z on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file and deb packages
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file and deb packages
|
||||
------------------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.x.z
|
||||
|
||||
If you install scylla by apt, you can find the deb packages in ``/var/cache/apt/``, backup them to ``scylla_1.x.y_backup`` directory which will be used in rollback.
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 1.x.y version, stop right here! This guide only covers 1.x.y to 1.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **1.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.x.z to 1.x.y. Apply this procedure if an upgrade from 1.x.y to 1.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.x.y, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Install the old release from backuped deb packages
|
||||
--------------------------------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla_1.x.y_backup/scylla*.deb
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,201 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.0 to 2.1 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.0 to Scylla 2.1, and rollback to 2.0 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.0.x to Scylla version 2.1.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.1 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2.0
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.0.x version, stop right here! This guide only covers 2.0.x to 2.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.1**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.1
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.0 to 2.1<metric-update-2.0-to-2.1>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.1.x to 2.0.y. Apply this procedure if an upgrade from 2.0 to 2.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.1
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.0, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.0**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2.0 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.1 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,200 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.1 to 2.2 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.1 to Scylla 2.2, and rollback to 2.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.1.x to Scylla version 2.2.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.2 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.1; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.1.x version, stop right here! This guide only covers 2.1.x to 2.2.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.2**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.2
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.1 to 2.2<metric-update-2.1-to-2.2>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.2.x to 2.1.y. Apply this procedure if an upgrade from 2.1 to 2.2 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.2
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.1, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.1**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.1 $conf; done
|
||||
sudo systemctl daemon-reload (Ubuntu 16.04 and Debian 8)
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.2 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,203 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.2 to 2.3 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.2 to Scylla 2.3, and rollback to 2.2 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.2.x to Scylla version 2.3.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.3 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.2; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.2.x version, stop right here! This guide only covers 2.2.x to 2.3.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.3**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.3
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.2 to 2.3<metric-update-2.2-to-2.3>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.3.x to 2.2.y. Apply this procedure if an upgrade from 2.2 to 2.3 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.3
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.2, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
|
||||
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.2**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.2 $conf; done
|
||||
sudo systemctl daemon-reload (Ubuntu 16.04 and Debian 8)
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.3 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,218 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.3 to 3.0 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.3 to Scylla 3.0, and rollback to 2.3 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.3.x to Scylla version 3.0.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 3.0 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.3; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.3.x version, stop right here! This guide only covers 2.3.x to 3.0.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **3.0**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 3.0
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
4. Upgrade node_exporter
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service node_exporter stop
|
||||
sudo rm /usr/bin/node_exporter
|
||||
sudo node_exporter_install
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.3 to 3.0<metric-update-2.3-to-3.0>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 3.0.x to 2.3.y. Apply this procedure if an upgrade from 2.3 to 3.0 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 3.0
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.3, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.3**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.3 $conf; done
|
||||
sudo systemctl daemon-reload (Ubuntu 16.04 and Debian 8)
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 3.0 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Install old node_exporter
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service node_exporter stop
|
||||
sudo rm /usr/bin/node_exporter
|
||||
sudo node_exporter_install
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,155 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 2.x.y to 2.x.z for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.x.y to Scylla 2.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.x.y to Scylla version 2.x.z on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.x.y version, stop right here! This guide only covers 2.x.y to 2.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.x.z to 2.x.y. Apply this procedure if an upgrade from 2.x.y to 2.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=2.x.y\* scylla-server=2.x.y\* scylla-jmx=2.x.y\* scylla-tools=2.x.y\* scylla-tools-core=2.x.y\* scylla-kernel-conf=2.x.y\* scylla-conf=2.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,161 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 3.x.y to 3.x.z for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 3.x.y to Scylla 3.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 3.x.y to Scylla version 3.x.z on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
.. include:: /upgrade/_common/note-ubuntu14.rst
|
||||
|
||||
.. include:: /upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/_common/note_3.1.0_to_3.1.1.rst
|
||||
|
||||
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 3.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-3.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 3.x.y version, stop right here! This guide only covers 3.x.y to 3.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **3.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Ubuntu 18.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 3.x.z to 3.x.y. Apply this procedure if an upgrade from 3.x.y to 3.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 3.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 3.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=3.x.y\* scylla-server=3.x.y\* scylla-jmx=3.x.y\* scylla-tools=3.x.y\* scylla-tools-core=3.x.y\* scylla-kernel-conf=3.x.y\* scylla-conf=3.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-3.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,158 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - ScyllaDB |FROM| to |TO| for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla |FROM| to Scylla |TO|.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |FROM| to Scylla version |TO| on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
.. include:: /upgrade/_common/note-ubuntu14.rst
|
||||
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |TO| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-4.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |FROM| version, stop right here! This guide only covers |FROM| to |TO| upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **4.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Ubuntu 18.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release |TO| to |FROM|. Apply this procedure if an upgrade from |FROM| to |TO| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |TO|.
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |FROM|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=4.x.y\* scylla-server=4.x.y\* scylla-jmx=4.x.y\* scylla-tools=4.x.y\* scylla-tools-core=4.x.y\* scylla-kernel-conf=4.x.y\* scylla-conf=4.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-4.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,183 +0,0 @@
|
||||
=============================================================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
.. _rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,197 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-|SRC_VERSION|
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Ubuntu 16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Config java to 1.8, which is requested by |SCYLLA_NAME| |NEW_VERSION|
|
||||
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |OPENJDK|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get dist-upgrade |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get remove scylla\* -y
|
||||
\ sudo apt-get install |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-|SRC_VERSION| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,196 +0,0 @@
|
||||
=============================================================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
---------------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,213 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-|SRC_VERSION|
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Ubuntu 16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Config java to 1.8, which is requested by |SCYLLA_NAME| |NEW_VERSION|
|
||||
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |OPENJDK|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get dist-upgrade |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restore system tables
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get remove scylla\* -y
|
||||
\ sudo apt-get install |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-|SRC_VERSION| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
----------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,192 +0,0 @@
|
||||
=============================================================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
Upgrading your Scylla version is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check the cluster's schema
|
||||
* Drain the node and backup the data
|
||||
* Backup the configuration file
|
||||
* Stop the Scylla service
|
||||
* Download and install new Scylla packages
|
||||
* Start the Scylla service
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check the cluster schema
|
||||
------------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade as any schema disagreement between the nodes causes the upgrade to fail.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is **highly recommended** to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to an external backup device.
|
||||
|
||||
When the upgrade is complete (for all nodes), remove the snapshot by running ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of disk space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what Scylla version you are currently running with ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. Install the new Scylla version
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. include:: /rst_include/scylla-commands-start-index.rst
|
||||
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version. Validate that the version matches the one you upgraded to.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade was successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for the nodes that you upgraded to |NEW_VERSION|
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Reload the systemd configuration
|
||||
* Restart the Scylla service
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the rollback was successful and that the node is up and running with the old version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
---------------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
|
||||
.. include:: /rst_include/scylla-commands-start-index.rst
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster. Keep in mind that the version you want to see on your node is the old version, which you noted at the beginning of the procedure.
|
||||
@@ -1,203 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-|SRC_VERSION|
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
|
||||
2. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get clean all
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get dist-upgrade |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restore system tables
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get remove scylla\* -y
|
||||
\ sudo apt-get install |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-|SRC_VERSION| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
----------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -58,9 +58,14 @@ When the upgrade is completed on all nodes, remove the snapshot with the ``nodet
|
||||
|
||||
Backup the configuration file
|
||||
------------------------------
|
||||
|
||||
Back up the ``scylla.yaml`` configuration file and the ScyllaDB packages
|
||||
in case you need to rollback the upgrade.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup
|
||||
sudo cp /etc/yum.repos.d/scylla.repo ~/scylla.repo-backup
|
||||
|
||||
Stop ScyllaDB
|
||||
---------------
|
||||
@@ -69,7 +74,7 @@ Stop ScyllaDB
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure-v4>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
@@ -98,6 +103,8 @@ Once you are sure the node upgrade was successful, move to the next node in the
|
||||
|
||||
See |Scylla_METRICS|_ for more information..
|
||||
|
||||
.. _rollback-procedure-v4:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
@@ -122,29 +129,33 @@ Rollback Steps
|
||||
==============
|
||||
Gracefully shutdown ScyllaDB
|
||||
-----------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
nodetool snapshot
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the old release
|
||||
Restore and install the old release
|
||||
------------------------------------
|
||||
#. Remove the old repo file.
|
||||
#. Restore the |SRC_VERSION| packages backed up during the upgrade.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
sudo cp ~/scylla.repo-backup /etc/yum.repos.d/scylla.repo
|
||||
sudo chown root.root /etc/yum.repos.d/scylla.repo
|
||||
sudo chmod 644 /etc/yum.repos.d/scylla.repo
|
||||
|
||||
#. Update the |SCYLLA_REPO|_ to |SRC_VERSION|.
|
||||
#. Install:
|
||||
|
||||
.. code:: console
|
||||
|
||||
sudo yum clean all
|
||||
sudo rm -rf /var/cache/yum
|
||||
sudo yum remove scylla\\*tools-core
|
||||
sudo yum downgrade scylla\\* -y
|
||||
sudo yum install scylla
|
||||
sudo yum downgrade scylla-\*cqlsh -y
|
||||
sudo yum remove scylla-\*cqlsh -y
|
||||
sudo yum downgrade scylla\* -y
|
||||
sudo yum install scylla -y
|
||||
|
||||
|
||||
Restore the configuration file
|
||||
@@ -153,18 +164,7 @@ Restore the configuration file
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src | /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot because |NEW_VERSION| uses a different set of system tables. See :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>` for details.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo cp /etc/scylla/scylla.yaml-backup /etc/scylla/scylla.yaml
|
||||
|
||||
Reload systemd configuration
|
||||
---------------------------------
|
||||
|
||||
@@ -60,9 +60,13 @@ When the upgrade is completed on all nodes, remove the snapshot with the ``nodet
|
||||
Backup the configuration file
|
||||
------------------------------
|
||||
|
||||
Back up the ``scylla.yaml`` configuration file and the ScyllaDB packages
|
||||
in case you need to rollback the upgrade.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup
|
||||
sudo cp /etc/apt/sources.list.d/scylla.list ~/scylla.list-backup
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
@@ -44,7 +44,6 @@ For each of the nodes you rollback to |SRC_VERSION|, you will:
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old ScyllaDB packages
|
||||
* Restore the configuration file
|
||||
* Restore system tables
|
||||
* Reload systemd configuration
|
||||
* Restart ScyllaDB
|
||||
* Validate the rollback success
|
||||
@@ -59,17 +58,19 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the old release
|
||||
Restore and install the old release
|
||||
------------------------------------
|
||||
#. Remove the old repo file.
|
||||
#. Restore the |SRC_VERSION| packages backed up during the upgrade.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
sudo cp ~/scylla.list-backup /etc/apt/sources.list.d/scylla.list
|
||||
sudo chown root.root /etc/apt/sources.list.d/scylla.list
|
||||
sudo chmod 644 /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
#. Update the |SCYLLA_REPO|_ to |SRC_VERSION|.
|
||||
#. Install:
|
||||
|
||||
.. code-block::
|
||||
@@ -85,18 +86,7 @@ Restore the configuration file
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src | /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from the previous snapshot because |NEW_VERSION| uses a different set of system tables. See :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>` for reference.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo cp /etc/scylla/scylla.yaml-backup /etc/scylla/scylla.yaml
|
||||
|
||||
Reload systemd configuration
|
||||
----------------------------
|
||||
|
||||
@@ -1 +1 @@
|
||||
.. note:: Execute the following commands one node at the time, moving to the next node only **after** the rollback procedure completed successfully.
|
||||
.. note:: Execute the following commands one node at a time, moving to the next node only **after** the rollback procedure is completed successfully.
|
||||
|
||||
@@ -34,7 +34,7 @@ The following example shows the upgrade path for a 3-node cluster from version 4
|
||||
#. Upgrade all three nodes to version 4.6.
|
||||
|
||||
|
||||
Upgrading to each patch version by following the :doc:`Scylla Maintenance Release Upgrade Guide </upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/index>`
|
||||
Upgrading to each patch version by following the Maintenance Release Upgrade Guide
|
||||
is optional. However, we recommend upgrading to the latest patch release for your version before upgrading to a new version.
|
||||
For example, upgrade to patch 4.4.8 before upgrading to version 4.5.
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ ScyllaDB follows the ``MAJOR.MINOR.PATCH`` `semantic versioning <https://semver.
|
||||
ScyllaDB Open Source:
|
||||
|
||||
* ``MAJOR`` versions: 4.y, 5.y
|
||||
* ``MINOR`` versions: 5.2.z, 5.3.z
|
||||
* ``MINOR`` versions: 5.2.z, 5.4.z
|
||||
* ``PATCH`` versions: 5.2.1, 5.2.2
|
||||
|
||||
|
||||
|
||||
@@ -4,31 +4,13 @@ Upgrade ScyllaDB Open Source
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
||||
ScyllaDB 5.2 to 5.3 <upgrade-guide-from-5.2-to-5.3/index>
|
||||
|
||||
ScyllaDB 5.2 to 5.4 <upgrade-guide-from-5.2-to-5.4/index>
|
||||
ScyllaDB 5.1 to 5.2 <upgrade-guide-from-5.1-to-5.2/index>
|
||||
ScyllaDB 5.0 to 5.1 <upgrade-guide-from-5.0-to-5.1/index>
|
||||
ScyllaDB 5.x maintenance release <upgrade-guide-from-5.x.y-to-5.x.z/index>
|
||||
ScyllaDB 4.6 to 5.0 <upgrade-guide-from-4.6-to-5.0/index>
|
||||
ScyllaDb 4.5 to 4.6 <upgrade-guide-from-4.5-to-4.6/index>
|
||||
ScyllaDB 4.4 to 4.5 <upgrade-guide-from-4.4-to-4.5/index>
|
||||
ScyllaDB 4.3 to 4.4 <upgrade-guide-from-4.3-to-4.4/index>
|
||||
ScyllaDB 4.2 to 4.3 <upgrade-guide-from-4.2-to-4.3/index>
|
||||
ScyllaDB 4.1 to 4.2 <upgrade-guide-from-4.1-to-4.2/index>
|
||||
ScyllaDB 4.x maintenance release <upgrade-guide-from-4.x.y-to-4.x.z/index>
|
||||
ScyllaDB 4.0 to 4.1 <upgrade-guide-from-4.0-to-4.1/index>
|
||||
ScyllaDB 3.x maintenance release <upgrade-guide-from-3.x.y-to-3.x.z/index>
|
||||
ScyllaDB 3.3 to 4.0 <upgrade-guide-from-3.3-to-4.0/index>
|
||||
ScyllaDB 3.2 to 3.3 <upgrade-guide-from-3.2-to-3.3/index>
|
||||
ScyllaDB 3.1 to 3.2 <upgrade-guide-from-3.1-to-3.2/index>
|
||||
ScyllaDB 3.0 to 3.1 <upgrade-guide-from-3.0-to-3.1/index>
|
||||
ScyllaDB 2.3 to 3.0 <upgrade-guide-from-2.3-to-3.0/index>
|
||||
ScyllaDB 2.2 to 2.3 <upgrade-guide-from-2.2-to-2.3/index>
|
||||
ScyllaDB 2.1 to 2.2 <upgrade-guide-from-2.1-to-2.2/index>
|
||||
ScyllaDB 2.x maintenance release <upgrade-guide-from-2.x.y-to-2.x.z/index>
|
||||
Older versions <upgrade-archive>
|
||||
Ubuntu 14.04 to 16.04 <upgrade-guide-from-ubuntu-14-to-16>
|
||||
|
||||
|
||||
|
||||
.. panel-box::
|
||||
:title: Upgrade ScyllaDB Open Source
|
||||
@@ -38,27 +20,9 @@ Upgrade ScyllaDB Open Source
|
||||
|
||||
Procedures for upgrading to a newer version of ScyllaDB Open Source.
|
||||
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.2 to 5.3 <upgrade-guide-from-5.2-to-5.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.2 to 5.4 <upgrade-guide-from-5.2-to-5.4/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.1 to 5.2 <upgrade-guide-from-5.1-to-5.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.0 to 5.1 <upgrade-guide-from-5.0-to-5.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.x maintenance releases <upgrade-guide-from-5.x.y-to-5.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.6 to 5.0 <upgrade-guide-from-4.6-to-5.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.5 to 4.6 <upgrade-guide-from-4.5-to-4.6/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.4 to 4.5 <upgrade-guide-from-4.4-to-4.5/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.3 to 4.4 <upgrade-guide-from-4.3-to-4.4/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.2 to 4.3 <upgrade-guide-from-4.2-to-4.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.1 to 4.2 <upgrade-guide-from-4.1-to-4.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.x maintenance release <upgrade-guide-from-4.x.y-to-4.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.0 to 4.1 <upgrade-guide-from-4.0-to-4.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.x maintenance release <upgrade-guide-from-3.x.y-to-3.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.3 to 4.0 <upgrade-guide-from-3.3-to-4.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.2 to 3.3 <upgrade-guide-from-3.2-to-3.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.1 to 3.2 <upgrade-guide-from-3.1-to-3.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.0 to 3.1 <upgrade-guide-from-3.0-to-3.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.3 to 3.0 <upgrade-guide-from-2.3-to-3.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.2 to 2.3 <upgrade-guide-from-2.2-to-2.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.1 to 2.2 <upgrade-guide-from-2.1-to-2.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.x maintenance release <upgrade-guide-from-2.x.y-to-2.x.z/index>`
|
||||
* :doc:`Upgrade Guide - older versions <upgrade-archive>`
|
||||
* :doc:`Upgrade Guide - Ubuntu 14.04 to 16.04 <upgrade-guide-from-ubuntu-14-to-16>`
|
||||
* :ref:`Upgrade Unified Installer (relocatable executable) install <unified-installed-upgrade>`
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user