Compare commits
119 Commits
next-5.4
...
mykaul-pat
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
953fee7fb0 | ||
|
|
6c90d166cc | ||
|
|
4b80130b0b | ||
|
|
a5519c7c1f | ||
|
|
f8104b92f8 | ||
|
|
2a21029ff5 | ||
|
|
4abcec9296 | ||
|
|
dcaaa74cd4 | ||
|
|
65bf5877e7 | ||
|
|
0cba973972 | ||
|
|
9347b61d3b | ||
|
|
3da02e1bf4 | ||
|
|
7c580b4bd4 | ||
|
|
d7031de538 | ||
|
|
0c6a3f568a | ||
|
|
55ee999f89 | ||
|
|
ee9cc450d4 | ||
|
|
4af585ec0e | ||
|
|
ea6c281b9f | ||
|
|
e7dd0ec033 | ||
|
|
a1271d2d5c | ||
|
|
950a1ff22c | ||
|
|
5a17a02abb | ||
|
|
940c2d1138 | ||
|
|
c960c2cdbf | ||
|
|
0080b15939 | ||
|
|
686adec52e | ||
|
|
8756838b16 | ||
|
|
6b84bc50c3 | ||
|
|
02cad8f85b | ||
|
|
b36cef6f1a | ||
|
|
af8bc8ba63 | ||
|
|
f181ac033a | ||
|
|
19fc01be23 | ||
|
|
4b57c2bf18 | ||
|
|
a212ddc5b1 | ||
|
|
9231454acd | ||
|
|
6db2698786 | ||
|
|
9f62bfa961 | ||
|
|
ad90bb8d87 | ||
|
|
c240c70278 | ||
|
|
c2cd11a8b3 | ||
|
|
890113a9cf | ||
|
|
fbcd667030 | ||
|
|
460bc7d8e1 | ||
|
|
ffefa623f4 | ||
|
|
92966d935a | ||
|
|
11d7cadf0d | ||
|
|
059d647ee5 | ||
|
|
80c656a08b | ||
|
|
0c69a312db | ||
|
|
899ecaffcd | ||
|
|
fded314e46 | ||
|
|
a6e68d8309 | ||
|
|
60145d9526 | ||
|
|
39966e0eb1 | ||
|
|
c256cca6f1 | ||
|
|
b105be220b | ||
|
|
2a7932efa1 | ||
|
|
ec94cc9538 | ||
|
|
0981661f8b | ||
|
|
2d543af78e | ||
|
|
0632ad50f3 | ||
|
|
572c880d97 | ||
|
|
0396ce7977 | ||
|
|
ef1d2b2c86 | ||
|
|
14e10e7db4 | ||
|
|
7d5e22b43b | ||
|
|
c8cb70918b | ||
|
|
f3dc01c85e | ||
|
|
274cf7a93a | ||
|
|
f69a44bb37 | ||
|
|
b340bd6d9e | ||
|
|
f7e269ccb8 | ||
|
|
c1486fee40 | ||
|
|
f80fff3484 | ||
|
|
7f81957437 | ||
|
|
51466dcb23 | ||
|
|
a0aee54f2c | ||
|
|
6fbd210679 | ||
|
|
dde36b5d9d | ||
|
|
54dd7cf1da | ||
|
|
c89ead55ff | ||
|
|
d59cd662f8 | ||
|
|
c3b3e5b107 | ||
|
|
059d7c795e | ||
|
|
7810e8d860 | ||
|
|
2b4e1e0f9c | ||
|
|
fad71029f0 | ||
|
|
a3044d1f46 | ||
|
|
98d067e77d | ||
|
|
5ebc0e8617 | ||
|
|
449b4c79c2 | ||
|
|
7653059369 | ||
|
|
96d9e768c4 | ||
|
|
fcd092473c | ||
|
|
7e6017d62d | ||
|
|
198119f737 | ||
|
|
39e96c6521 | ||
|
|
7b3e0ab1f2 | ||
|
|
3553556708 | ||
|
|
37da5a0638 | ||
|
|
22bf3c03df | ||
|
|
b852ad25bf | ||
|
|
e76a02abc5 | ||
|
|
e85fc9f8be | ||
|
|
21b61e8f0a | ||
|
|
b3e5c8c348 | ||
|
|
ce46f7b91b | ||
|
|
1efd0d9a92 | ||
|
|
50c8619ed9 | ||
|
|
5d3584faa5 | ||
|
|
e485c854b2 | ||
|
|
c2eb1ae543 | ||
|
|
795dcf2ead | ||
|
|
e004469827 | ||
|
|
eb5a9c535a | ||
|
|
bf25b5fe76 | ||
|
|
8c4f9379d5 |
@@ -78,7 +78,6 @@ target_sources(scylla-main
|
||||
debug.cc
|
||||
init.cc
|
||||
keys.cc
|
||||
message/messaging_service.cc
|
||||
multishard_mutation_query.cc
|
||||
mutation_query.cc
|
||||
partition_slice_builder.cc
|
||||
@@ -124,6 +123,7 @@ add_subdirectory(index)
|
||||
add_subdirectory(interface)
|
||||
add_subdirectory(lang)
|
||||
add_subdirectory(locator)
|
||||
add_subdirectory(message)
|
||||
add_subdirectory(mutation)
|
||||
add_subdirectory(mutation_writer)
|
||||
add_subdirectory(node_ops)
|
||||
@@ -165,6 +165,7 @@ target_link_libraries(scylla PRIVATE
|
||||
index
|
||||
lang
|
||||
locator
|
||||
message
|
||||
mutation
|
||||
mutation_writer
|
||||
raft
|
||||
@@ -193,35 +194,6 @@ target_link_libraries(scylla PRIVATE
|
||||
seastar
|
||||
Boost::program_options)
|
||||
|
||||
# Force SHA1 build-id generation
|
||||
set(default_linker_flags "-Wl,--build-id=sha1")
|
||||
include(CheckLinkerFlag)
|
||||
set(Scylla_USE_LINKER
|
||||
""
|
||||
CACHE
|
||||
STRING
|
||||
"Use specified linker instead of the default one")
|
||||
if(Scylla_USE_LINKER)
|
||||
set(linkers "${Scylla_USE_LINKER}")
|
||||
else()
|
||||
set(linkers "lld" "gold")
|
||||
endif()
|
||||
|
||||
foreach(linker ${linkers})
|
||||
set(linker_flag "-fuse-ld=${linker}")
|
||||
check_linker_flag(CXX ${linker_flag} "CXX_LINKER_HAVE_${linker}")
|
||||
if(CXX_LINKER_HAVE_${linker})
|
||||
string(APPEND default_linker_flags " ${linker_flag}")
|
||||
break()
|
||||
elseif(Scylla_USE_LINKER)
|
||||
message(FATAL_ERROR "${Scylla_USE_LINKER} is not supported.")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${default_linker_flags}" CACHE INTERNAL "")
|
||||
|
||||
# TODO: patch dynamic linker to match configure.py behavior
|
||||
|
||||
target_include_directories(scylla PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
"${scylla_gen_build_dir}")
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.4.0-dev
|
||||
VERSION=5.5.0-dev
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -80,7 +80,7 @@ static sstring_view table_status_to_sstring(table_status tbl_status) {
|
||||
return "UKNOWN";
|
||||
}
|
||||
|
||||
static future<std::vector<mutation>> create_keyspace(std::string_view keyspace_name, service::storage_proxy& sp, gms::gossiper& gossiper, api::timestamp_type);
|
||||
static lw_shared_ptr<keyspace_metadata> create_keyspace_metadata(std::string_view keyspace_name, service::storage_proxy& sp, gms::gossiper& gossiper, api::timestamp_type);
|
||||
|
||||
static map_type attrs_type() {
|
||||
static thread_local auto t = map_type_impl::get_instance(utf8_type, bytes_type, true);
|
||||
@@ -448,7 +448,6 @@ static rjson::value fill_table_description(schema_ptr schema, table_status tbl_s
|
||||
rjson::add(table_description, "TableName", rjson::from_string(schema->cf_name()));
|
||||
// FIXME: take the tables creation time, not the current time!
|
||||
size_t creation_date_seconds = std::chrono::duration_cast<std::chrono::seconds>(gc_clock::now().time_since_epoch()).count();
|
||||
rjson::add(table_description, "CreationDateTime", rjson::value(creation_date_seconds));
|
||||
// FIXME: In DynamoDB the CreateTable implementation is asynchronous, and
|
||||
// the table may be in "Creating" state until creating is finished.
|
||||
// We don't currently do this in Alternator - instead CreateTable waits
|
||||
@@ -470,54 +469,58 @@ static rjson::value fill_table_description(schema_ptr schema, table_status tbl_s
|
||||
rjson::add(table_description["ProvisionedThroughput"], "WriteCapacityUnits", 0);
|
||||
rjson::add(table_description["ProvisionedThroughput"], "NumberOfDecreasesToday", 0);
|
||||
|
||||
std::unordered_map<std::string,std::string> key_attribute_types;
|
||||
// Add base table's KeySchema and collect types for AttributeDefinitions:
|
||||
executor::describe_key_schema(table_description, *schema, key_attribute_types);
|
||||
|
||||
|
||||
data_dictionary::table t = proxy.data_dictionary().find_column_family(schema);
|
||||
if (!t.views().empty()) {
|
||||
rjson::value gsi_array = rjson::empty_array();
|
||||
rjson::value lsi_array = rjson::empty_array();
|
||||
for (const view_ptr& vptr : t.views()) {
|
||||
rjson::value view_entry = rjson::empty_object();
|
||||
const sstring& cf_name = vptr->cf_name();
|
||||
size_t delim_it = cf_name.find(':');
|
||||
if (delim_it == sstring::npos) {
|
||||
elogger.error("Invalid internal index table name: {}", cf_name);
|
||||
continue;
|
||||
|
||||
if (tbl_status != table_status::deleting) {
|
||||
rjson::add(table_description, "CreationDateTime", rjson::value(creation_date_seconds));
|
||||
std::unordered_map<std::string,std::string> key_attribute_types;
|
||||
// Add base table's KeySchema and collect types for AttributeDefinitions:
|
||||
executor::describe_key_schema(table_description, *schema, key_attribute_types);
|
||||
if (!t.views().empty()) {
|
||||
rjson::value gsi_array = rjson::empty_array();
|
||||
rjson::value lsi_array = rjson::empty_array();
|
||||
for (const view_ptr& vptr : t.views()) {
|
||||
rjson::value view_entry = rjson::empty_object();
|
||||
const sstring& cf_name = vptr->cf_name();
|
||||
size_t delim_it = cf_name.find(':');
|
||||
if (delim_it == sstring::npos) {
|
||||
elogger.error("Invalid internal index table name: {}", cf_name);
|
||||
continue;
|
||||
}
|
||||
sstring index_name = cf_name.substr(delim_it + 1);
|
||||
rjson::add(view_entry, "IndexName", rjson::from_string(index_name));
|
||||
rjson::add(view_entry, "IndexArn", generate_arn_for_index(*schema, index_name));
|
||||
// Add indexes's KeySchema and collect types for AttributeDefinitions:
|
||||
executor::describe_key_schema(view_entry, *vptr, key_attribute_types);
|
||||
// Add projection type
|
||||
rjson::value projection = rjson::empty_object();
|
||||
rjson::add(projection, "ProjectionType", "ALL");
|
||||
// FIXME: we have to get ProjectionType from the schema when it is added
|
||||
rjson::add(view_entry, "Projection", std::move(projection));
|
||||
// Local secondary indexes are marked by an extra '!' sign occurring before the ':' delimiter
|
||||
rjson::value& index_array = (delim_it > 1 && cf_name[delim_it-1] == '!') ? lsi_array : gsi_array;
|
||||
rjson::push_back(index_array, std::move(view_entry));
|
||||
}
|
||||
if (!lsi_array.Empty()) {
|
||||
rjson::add(table_description, "LocalSecondaryIndexes", std::move(lsi_array));
|
||||
}
|
||||
if (!gsi_array.Empty()) {
|
||||
rjson::add(table_description, "GlobalSecondaryIndexes", std::move(gsi_array));
|
||||
}
|
||||
sstring index_name = cf_name.substr(delim_it + 1);
|
||||
rjson::add(view_entry, "IndexName", rjson::from_string(index_name));
|
||||
rjson::add(view_entry, "IndexArn", generate_arn_for_index(*schema, index_name));
|
||||
// Add indexes's KeySchema and collect types for AttributeDefinitions:
|
||||
executor::describe_key_schema(view_entry, *vptr, key_attribute_types);
|
||||
// Add projection type
|
||||
rjson::value projection = rjson::empty_object();
|
||||
rjson::add(projection, "ProjectionType", "ALL");
|
||||
// FIXME: we have to get ProjectionType from the schema when it is added
|
||||
rjson::add(view_entry, "Projection", std::move(projection));
|
||||
// Local secondary indexes are marked by an extra '!' sign occurring before the ':' delimiter
|
||||
rjson::value& index_array = (delim_it > 1 && cf_name[delim_it-1] == '!') ? lsi_array : gsi_array;
|
||||
rjson::push_back(index_array, std::move(view_entry));
|
||||
}
|
||||
if (!lsi_array.Empty()) {
|
||||
rjson::add(table_description, "LocalSecondaryIndexes", std::move(lsi_array));
|
||||
}
|
||||
if (!gsi_array.Empty()) {
|
||||
rjson::add(table_description, "GlobalSecondaryIndexes", std::move(gsi_array));
|
||||
// Use map built by describe_key_schema() for base and indexes to produce
|
||||
// AttributeDefinitions for all key columns:
|
||||
rjson::value attribute_definitions = rjson::empty_array();
|
||||
for (auto& type : key_attribute_types) {
|
||||
rjson::value key = rjson::empty_object();
|
||||
rjson::add(key, "AttributeName", rjson::from_string(type.first));
|
||||
rjson::add(key, "AttributeType", rjson::from_string(type.second));
|
||||
rjson::push_back(attribute_definitions, std::move(key));
|
||||
}
|
||||
rjson::add(table_description, "AttributeDefinitions", std::move(attribute_definitions));
|
||||
}
|
||||
// Use map built by describe_key_schema() for base and indexes to produce
|
||||
// AttributeDefinitions for all key columns:
|
||||
rjson::value attribute_definitions = rjson::empty_array();
|
||||
for (auto& type : key_attribute_types) {
|
||||
rjson::value key = rjson::empty_object();
|
||||
rjson::add(key, "AttributeName", rjson::from_string(type.first));
|
||||
rjson::add(key, "AttributeType", rjson::from_string(type.second));
|
||||
rjson::push_back(attribute_definitions, std::move(key));
|
||||
}
|
||||
rjson::add(table_description, "AttributeDefinitions", std::move(attribute_definitions));
|
||||
|
||||
executor::supplement_table_stream_info(table_description, *schema, proxy);
|
||||
|
||||
// FIXME: still missing some response fields (issue #5026)
|
||||
@@ -1118,8 +1121,9 @@ static future<executor::request_return_type> create_table_on_shard0(tracing::tra
|
||||
auto group0_guard = co_await mm.start_group0_operation();
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
std::vector<mutation> schema_mutations;
|
||||
auto ksm = create_keyspace_metadata(keyspace_name, sp, gossiper, ts);
|
||||
try {
|
||||
schema_mutations = co_await create_keyspace(keyspace_name, sp, gossiper, ts);
|
||||
schema_mutations = service::prepare_new_keyspace_announcement(sp.local_db(), ksm, ts);
|
||||
} catch (exceptions::already_exists_exception&) {
|
||||
if (sp.data_dictionary().has_schema(keyspace_name, table_name)) {
|
||||
co_return api_error::resource_in_use(format("Table {} already exists", table_name));
|
||||
@@ -1129,15 +1133,7 @@ static future<executor::request_return_type> create_table_on_shard0(tracing::tra
|
||||
// This should never happen, the ID is supposed to be unique
|
||||
co_return api_error::internal(format("Table with ID {} already exists", schema->id()));
|
||||
}
|
||||
db::schema_tables::add_table_or_view_to_schema_mutation(schema, ts, true, schema_mutations);
|
||||
// we must call before_create_column_family callbacks - which allow
|
||||
// listeners to modify our schema_mutations. For example, CDC may add
|
||||
// another table (the CDC log table) to the same keyspace.
|
||||
// Unfortunately the convention is that this callback must be run in
|
||||
// a Seastar thread.
|
||||
co_await seastar::async([&] {
|
||||
mm.get_notifier().before_create_column_family(*schema, schema_mutations, ts);
|
||||
});
|
||||
co_await service::prepare_new_column_family_announcement(schema_mutations, sp, *ksm, schema, ts);
|
||||
for (schema_builder& view_builder : view_builders) {
|
||||
db::schema_tables::add_table_or_view_to_schema_mutation(
|
||||
view_ptr(view_builder.build()), ts, true, schema_mutations);
|
||||
@@ -4461,25 +4457,23 @@ future<executor::request_return_type> executor::describe_continuous_backups(clie
|
||||
co_return make_jsonable(std::move(response));
|
||||
}
|
||||
|
||||
// Create the keyspace in which we put the alternator table, if it doesn't
|
||||
// already exist.
|
||||
// Create the metadata for the keyspace in which we put the alternator
|
||||
// table if it doesn't already exist.
|
||||
// Currently, we automatically configure the keyspace based on the number
|
||||
// of nodes in the cluster: A cluster with 3 or more live nodes, gets RF=3.
|
||||
// A smaller cluster (presumably, a test only), gets RF=1. The user may
|
||||
// manually create the keyspace to override this predefined behavior.
|
||||
static future<std::vector<mutation>> create_keyspace(std::string_view keyspace_name, service::storage_proxy& sp, gms::gossiper& gossiper, api::timestamp_type ts) {
|
||||
sstring keyspace_name_str(keyspace_name);
|
||||
static lw_shared_ptr<keyspace_metadata> create_keyspace_metadata(std::string_view keyspace_name, service::storage_proxy& sp, gms::gossiper& gossiper, api::timestamp_type ts) {
|
||||
int endpoint_count = gossiper.num_endpoints();
|
||||
int rf = 3;
|
||||
if (endpoint_count < rf) {
|
||||
rf = 1;
|
||||
elogger.warn("Creating keyspace '{}' for Alternator with unsafe RF={} because cluster only has {} nodes.",
|
||||
keyspace_name_str, rf, endpoint_count);
|
||||
keyspace_name, rf, endpoint_count);
|
||||
}
|
||||
auto opts = get_network_topology_options(sp, gossiper, rf);
|
||||
auto ksm = keyspace_metadata::new_keyspace(keyspace_name_str, "org.apache.cassandra.locator.NetworkTopologyStrategy", std::move(opts), true);
|
||||
|
||||
co_return service::prepare_new_keyspace_announcement(sp.local_db(), ksm, ts);
|
||||
return keyspace_metadata::new_keyspace(keyspace_name, "org.apache.cassandra.locator.NetworkTopologyStrategy", std::move(opts), true);
|
||||
}
|
||||
|
||||
future<> executor::start() {
|
||||
|
||||
20
api/api.cc
20
api/api.cc
@@ -270,28 +270,36 @@ future<> set_server_done(http_context& ctx) {
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_task_manager(http_context& ctx, lw_shared_ptr<db::config> cfg) {
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &cfg = *cfg](routes& r) {
|
||||
return ctx.http_server.set_routes([rb, &ctx, &tm, &cfg = *cfg](routes& r) {
|
||||
rb->register_function(r, "task_manager",
|
||||
"The task manager API");
|
||||
set_task_manager(ctx, r, cfg);
|
||||
set_task_manager(ctx, r, tm, cfg);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_task_manager(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_task_manager(ctx, r); });
|
||||
}
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
future<> set_server_task_manager_test(http_context& ctx) {
|
||||
future<> set_server_task_manager_test(http_context& ctx, sharded<tasks::task_manager>& tm) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx](routes& r) mutable {
|
||||
return ctx.http_server.set_routes([rb, &ctx, &tm](routes& r) mutable {
|
||||
rb->register_function(r, "task_manager_test",
|
||||
"The task manager test API");
|
||||
set_task_manager_test(ctx, r);
|
||||
set_task_manager_test(ctx, r, tm);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_task_manager_test(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_task_manager_test(ctx, r); });
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void req_params::process(const request& req) {
|
||||
|
||||
@@ -61,6 +61,10 @@ class gossiper;
|
||||
|
||||
namespace auth { class service; }
|
||||
|
||||
namespace tasks {
|
||||
class task_manager;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context {
|
||||
@@ -70,11 +74,10 @@ struct http_context {
|
||||
distributed<replica::database>& db;
|
||||
service::load_meter& lmeter;
|
||||
const sharded<locator::shared_token_metadata>& shared_token_metadata;
|
||||
sharded<tasks::task_manager>& tm;
|
||||
|
||||
http_context(distributed<replica::database>& _db,
|
||||
service::load_meter& _lm, const sharded<locator::shared_token_metadata>& _stm, sharded<tasks::task_manager>& _tm)
|
||||
: db(_db), lmeter(_lm), shared_token_metadata(_stm), tm(_tm) {
|
||||
service::load_meter& _lm, const sharded<locator::shared_token_metadata>& _stm)
|
||||
: db(_db), lmeter(_lm), shared_token_metadata(_stm) {
|
||||
}
|
||||
|
||||
const locator::token_metadata& get_token_metadata();
|
||||
@@ -115,7 +118,9 @@ future<> set_server_gossip_settle(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> set_server_cache(http_context& ctx);
|
||||
future<> set_server_compaction_manager(http_context& ctx);
|
||||
future<> set_server_done(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx, lw_shared_ptr<db::config> cfg);
|
||||
future<> set_server_task_manager_test(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg);
|
||||
future<> unset_server_task_manager(http_context& ctx);
|
||||
future<> set_server_task_manager_test(http_context& ctx, sharded<tasks::task_manager>& tm);
|
||||
future<> unset_server_task_manager_test(http_context& ctx);
|
||||
|
||||
}
|
||||
|
||||
@@ -111,16 +111,16 @@ future<full_task_status> retrieve_status(const tasks::task_manager::foreign_task
|
||||
co_return s;
|
||||
}
|
||||
|
||||
void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
tm::get_modules.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
std::vector<std::string> v = boost::copy_range<std::vector<std::string>>(ctx.tm.local().get_modules() | boost::adaptors::map_keys);
|
||||
void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>& tm, db::config& cfg) {
|
||||
tm::get_modules.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
std::vector<std::string> v = boost::copy_range<std::vector<std::string>>(tm.local().get_modules() | boost::adaptors::map_keys);
|
||||
co_return v;
|
||||
});
|
||||
|
||||
tm::get_tasks.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::get_tasks.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
using chunked_stats = utils::chunked_vector<task_stats>;
|
||||
auto internal = tasks::is_internal{req_param<bool>(*req, "internal", false)};
|
||||
std::vector<chunked_stats> res = co_await ctx.tm.map([&req, internal] (tasks::task_manager& tm) {
|
||||
std::vector<chunked_stats> res = co_await tm.map([&req, internal] (tasks::task_manager& tm) {
|
||||
chunked_stats local_res;
|
||||
tasks::task_manager::module_ptr module;
|
||||
try {
|
||||
@@ -156,11 +156,11 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
co_return std::move(f);
|
||||
});
|
||||
|
||||
tm::get_task_status.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::get_task_status.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
task = co_await tasks::task_manager::invoke_on_task(ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
task = co_await tasks::task_manager::invoke_on_task(tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
if (task->is_complete()) {
|
||||
task->unregister_task();
|
||||
}
|
||||
@@ -173,10 +173,10 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
co_return make_status(s);
|
||||
});
|
||||
|
||||
tm::abort_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::abort_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
co_await tasks::task_manager::invoke_on_task(tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
if (!task->is_abortable()) {
|
||||
co_await coroutine::return_exception(std::runtime_error("Requested task cannot be aborted"));
|
||||
}
|
||||
@@ -188,11 +188,11 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tm::wait_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::wait_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
task = co_await tasks::task_manager::invoke_on_task(ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) {
|
||||
task = co_await tasks::task_manager::invoke_on_task(tm, id, std::function([] (tasks::task_manager::task_ptr task) {
|
||||
return task->done().then_wrapped([task] (auto f) {
|
||||
task->unregister_task();
|
||||
// done() is called only because we want the task to be complete before getting its status.
|
||||
@@ -208,8 +208,8 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
co_return make_status(s);
|
||||
});
|
||||
|
||||
tm::get_task_status_recursively.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& _ctx = ctx;
|
||||
tm::get_task_status_recursively.set(r, [&_tm = tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& tm = _tm;
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
std::queue<tasks::task_manager::foreign_task_ptr> q;
|
||||
utils::chunked_vector<full_task_status> res;
|
||||
@@ -217,7 +217,7 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
// Get requested task.
|
||||
task = co_await tasks::task_manager::invoke_on_task(_ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
task = co_await tasks::task_manager::invoke_on_task(tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
if (task->is_complete()) {
|
||||
task->unregister_task();
|
||||
}
|
||||
@@ -264,4 +264,14 @@ void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
});
|
||||
}
|
||||
|
||||
void unset_task_manager(http_context& ctx, routes& r) {
|
||||
tm::get_modules.unset(r);
|
||||
tm::get_tasks.unset(r);
|
||||
tm::get_task_status.unset(r);
|
||||
tm::abort_task.unset(r);
|
||||
tm::wait_task.unset(r);
|
||||
tm::get_task_status_recursively.unset(r);
|
||||
tm::get_and_update_ttl.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -8,11 +8,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "api.hh"
|
||||
#include "db/config.hh"
|
||||
|
||||
namespace tasks {
|
||||
class task_manager;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager(http_context& ctx, httpd::routes& r, db::config& cfg);
|
||||
void set_task_manager(http_context& ctx, httpd::routes& r, sharded<tasks::task_manager>& tm, db::config& cfg);
|
||||
void unset_task_manager(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -20,17 +20,17 @@ namespace tmt = httpd::task_manager_test_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
tmt::register_test_module.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await ctx.tm.invoke_on_all([] (tasks::task_manager& tm) {
|
||||
void set_task_manager_test(http_context& ctx, routes& r, sharded<tasks::task_manager>& tm) {
|
||||
tmt::register_test_module.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await tm.invoke_on_all([] (tasks::task_manager& tm) {
|
||||
auto m = make_shared<tasks::test_module>(tm);
|
||||
tm.register_module("test", m);
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::unregister_test_module.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await ctx.tm.invoke_on_all([] (tasks::task_manager& tm) -> future<> {
|
||||
tmt::unregister_test_module.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await tm.invoke_on_all([] (tasks::task_manager& tm) -> future<> {
|
||||
auto module_name = "test";
|
||||
auto module = tm.find_module(module_name);
|
||||
co_await module->stop();
|
||||
@@ -38,8 +38,8 @@ void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::register_test_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
sharded<tasks::task_manager>& tms = ctx.tm;
|
||||
tmt::register_test_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
sharded<tasks::task_manager>& tms = tm;
|
||||
auto it = req->query_parameters.find("task_id");
|
||||
auto id = it != req->query_parameters.end() ? tasks::task_id{utils::UUID{it->second}} : tasks::task_id::create_null_id();
|
||||
it = req->query_parameters.find("shard");
|
||||
@@ -54,7 +54,7 @@ void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
tasks::task_info data;
|
||||
if (it != req->query_parameters.end()) {
|
||||
data.id = tasks::task_id{utils::UUID{it->second}};
|
||||
auto parent_ptr = co_await tasks::task_manager::lookup_task_on_all_shards(ctx.tm, data.id);
|
||||
auto parent_ptr = co_await tasks::task_manager::lookup_task_on_all_shards(tm, data.id);
|
||||
data.shard = parent_ptr->get_status().shard;
|
||||
}
|
||||
|
||||
@@ -69,10 +69,10 @@ void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
co_return id.to_sstring();
|
||||
});
|
||||
|
||||
tmt::unregister_test_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tmt::unregister_test_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->query_parameters["task_id"]}};
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
co_await tasks::task_manager::invoke_on_task(tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
tasks::test_task test_task{task};
|
||||
co_await test_task.unregister_task();
|
||||
});
|
||||
@@ -82,14 +82,14 @@ void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::finish_test_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tmt::finish_test_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
auto it = req->query_parameters.find("error");
|
||||
bool fail = it != req->query_parameters.end();
|
||||
std::string error = fail ? it->second : "";
|
||||
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [fail, error = std::move(error)] (tasks::task_manager::task_ptr task) {
|
||||
co_await tasks::task_manager::invoke_on_task(tm, id, [fail, error = std::move(error)] (tasks::task_manager::task_ptr task) {
|
||||
tasks::test_task test_task{task};
|
||||
if (fail) {
|
||||
test_task.finish_failed(std::make_exception_ptr(std::runtime_error(error)));
|
||||
@@ -105,6 +105,14 @@ void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
});
|
||||
}
|
||||
|
||||
void unset_task_manager_test(http_context& ctx, routes& r) {
|
||||
tmt::register_test_module.unset(r);
|
||||
tmt::unregister_test_module.unset(r);
|
||||
tmt::register_test_task.unset(r);
|
||||
tmt::unregister_test_task.unset(r);
|
||||
tmt::finish_test_task.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -10,11 +10,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "api.hh"
|
||||
|
||||
namespace tasks {
|
||||
class task_manager;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager_test(http_context& ctx, httpd::routes& r);
|
||||
void set_task_manager_test(http_context& ctx, httpd::routes& r, sharded<tasks::task_manager>& tm);
|
||||
void unset_task_manager_test(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -160,7 +160,7 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
void on_before_create_column_family(const schema& schema, std::vector<mutation>& mutations, api::timestamp_type timestamp) override {
|
||||
void on_before_create_column_family(const keyspace_metadata& ksm, const schema& schema, std::vector<mutation>& mutations, api::timestamp_type timestamp) override {
|
||||
if (schema.cdc_options().enabled()) {
|
||||
auto& db = _ctxt._proxy.get_db().local();
|
||||
auto logname = log_name(schema.cf_name());
|
||||
|
||||
27
cmake/Findrapidxml.cmake
Normal file
27
cmake/Findrapidxml.cmake
Normal file
@@ -0,0 +1,27 @@
|
||||
#
|
||||
# Copyright 2023-present ScyllaDB
|
||||
#
|
||||
|
||||
#
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
find_path(rapidxml_INCLUDE_DIR
|
||||
NAMES rapidxml.h rapidxml/rapidxml.hpp)
|
||||
|
||||
mark_as_advanced(
|
||||
rapidxml_INCLUDE_DIR)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
find_package_handle_standard_args(rapidxml
|
||||
REQUIRED_VARS
|
||||
rapidxml_INCLUDE_DIR)
|
||||
|
||||
if(rapidxml_FOUND)
|
||||
if(NOT TARGET rapidxml::rapidxml)
|
||||
add_library(rapidxml::rapidxml INTERFACE IMPORTED)
|
||||
set_target_properties(rapidxml::rapidxml
|
||||
PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${rapidxml_INCLUDE_DIR})
|
||||
endif()
|
||||
endif()
|
||||
@@ -21,7 +21,6 @@ add_compile_options(
|
||||
set(Seastar_DEFINITIONS_RELEASE
|
||||
SCYLLA_BUILD_MODE=release)
|
||||
|
||||
set(CMAKE_EXE_LINKER_FLAGS_RELEASE
|
||||
"-Wl,--gc-sections")
|
||||
add_link_options("LINKER:--gc-sections")
|
||||
|
||||
set(stack_usage_threshold_in_KB 13)
|
||||
|
||||
@@ -11,7 +11,7 @@ foreach(warning ${disabled_warnings})
|
||||
endif()
|
||||
endforeach()
|
||||
list(TRANSFORM _supported_warnings PREPEND "-Wno-")
|
||||
string(JOIN " " CMAKE_CXX_FLAGS
|
||||
add_compile_options(
|
||||
"-Wall"
|
||||
"-Werror"
|
||||
"-Wno-error=deprecated-declarations"
|
||||
@@ -34,14 +34,94 @@ function(default_target_arch arch)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
function(pad_at_begin output fill str length)
|
||||
# pad the given `${str} with `${fill}`, right aligned. with the syntax of
|
||||
# fmtlib:
|
||||
# fmt::print("{:#>{}}", str, length)
|
||||
# where `#` is the `${fill}` char
|
||||
string(LENGTH "${str}" str_len)
|
||||
math(EXPR padding_len "${length} - ${str_len}")
|
||||
if(padding_len GREATER 0)
|
||||
string(REPEAT ${fill} ${padding_len} padding)
|
||||
endif()
|
||||
set(${output} "${padding}${str}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# The relocatable package includes its own dynamic linker. We don't
|
||||
# know the path it will be installed to, so for now use a very long
|
||||
# path so that patchelf doesn't need to edit the program headers. The
|
||||
# kernel imposes a limit of 4096 bytes including the null. The other
|
||||
# constraint is that the build-id has to be in the first page, so we
|
||||
# can't use all 4096 bytes for the dynamic linker.
|
||||
# In here we just guess that 2000 extra / should be enough to cover
|
||||
# any path we get installed to but not so large that the build-id is
|
||||
# pushed to the second page.
|
||||
# At the end of the build we check that the build-id is indeed in the
|
||||
# first page. At install time we check that patchelf doesn't modify
|
||||
# the program headers.
|
||||
function(get_padded_dynamic_linker_option output length)
|
||||
set(dynamic_linker_option "-dynamic-linker")
|
||||
# capture the drive-generated command line first
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_C_COMPILER} "-###" /dev/null -o t
|
||||
ERROR_VARIABLE driver_command_line
|
||||
ERROR_STRIP_TRAILING_WHITESPACE)
|
||||
# extract the argument for the "-dynamic-linker" option
|
||||
if(driver_command_line MATCHES ".*\"?${dynamic_linker_option}\"? \"?([^ \"]*)\"? .*")
|
||||
set(dynamic_linker ${CMAKE_MATCH_1})
|
||||
else()
|
||||
message(FATAL_ERROR "Unable to find ${dynamic_linker_option} in driver-generated command: "
|
||||
"${driver_command_line}")
|
||||
endif()
|
||||
# prefixing a path with "/"s does not actually change it means
|
||||
pad_at_begin(padded_dynamic_linker "/" "${dynamic_linker}" ${length})
|
||||
set(${output} "${dynamic_linker_option}=${padded_dynamic_linker}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
add_compile_options("-ffile-prefix-map=${CMAKE_SOURCE_DIR}=.")
|
||||
|
||||
default_target_arch(target_arch)
|
||||
if(target_arch)
|
||||
string(APPEND CMAKE_CXX_FLAGS " -march=${target_arch}")
|
||||
add_compile_options("-march=${target_arch}")
|
||||
endif()
|
||||
|
||||
math(EXPR _stack_usage_threshold_in_bytes "${stack_usage_threshold_in_KB} * 1024")
|
||||
set(_stack_usage_threshold_flag "-Wstack-usage=${_stack_usage_threshold_in_bytes}")
|
||||
check_cxx_compiler_flag(${_stack_usage_threshold_flag} _stack_usage_flag_supported)
|
||||
if(_stack_usage_flag_supported)
|
||||
string(APPEND CMAKE_CXX_FLAGS " ${_stack_usage_threshold_flag}")
|
||||
add_compile_options("${_stack_usage_threshold_flag}")
|
||||
endif()
|
||||
|
||||
# Force SHA1 build-id generation
|
||||
add_link_options("LINKER:--build-id=sha1")
|
||||
include(CheckLinkerFlag)
|
||||
set(Scylla_USE_LINKER
|
||||
""
|
||||
CACHE
|
||||
STRING
|
||||
"Use specified linker instead of the default one")
|
||||
if(Scylla_USE_LINKER)
|
||||
set(linkers "${Scylla_USE_LINKER}")
|
||||
else()
|
||||
set(linkers "lld" "gold")
|
||||
endif()
|
||||
|
||||
foreach(linker ${linkers})
|
||||
set(linker_flag "-fuse-ld=${linker}")
|
||||
check_linker_flag(CXX ${linker_flag} "CXX_LINKER_HAVE_${linker}")
|
||||
if(CXX_LINKER_HAVE_${linker})
|
||||
add_link_options("${linker_flag}")
|
||||
break()
|
||||
elseif(Scylla_USE_LINKER)
|
||||
message(FATAL_ERROR "${Scylla_USE_LINKER} is not supported.")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
if(DEFINED ENV{NIX_CC})
|
||||
get_padded_dynamic_linker_option(dynamic_linker_option 0)
|
||||
else()
|
||||
# gdb has a SO_NAME_MAX_PATH_SIZE of 512, so limit the path size to
|
||||
# that. The 512 includes the null at the end, hence the 511 bellow.
|
||||
get_padded_dynamic_linker_option(dynamic_linker_option 511)
|
||||
endif()
|
||||
add_link_options("${dynamic_linker_option}")
|
||||
|
||||
@@ -325,16 +325,21 @@ public:
|
||||
void consume_end_of_stream();
|
||||
};
|
||||
|
||||
using use_backlog_tracker = bool_class<class use_backlog_tracker_tag>;
|
||||
|
||||
struct compaction_read_monitor_generator final : public read_monitor_generator {
|
||||
class compaction_read_monitor final : public sstables::read_monitor, public backlog_read_progress_manager {
|
||||
sstables::shared_sstable _sst;
|
||||
table_state& _table_s;
|
||||
const sstables::reader_position_tracker* _tracker = nullptr;
|
||||
uint64_t _last_position_seen = 0;
|
||||
use_backlog_tracker _use_backlog_tracker;
|
||||
public:
|
||||
virtual void on_read_started(const sstables::reader_position_tracker& tracker) override {
|
||||
_tracker = &tracker;
|
||||
_table_s.get_backlog_tracker().register_compacting_sstable(_sst, *this);
|
||||
if (_use_backlog_tracker) {
|
||||
_table_s.get_backlog_tracker().register_compacting_sstable(_sst, *this);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void on_read_completed() override {
|
||||
@@ -352,19 +357,19 @@ struct compaction_read_monitor_generator final : public read_monitor_generator {
|
||||
}
|
||||
|
||||
void remove_sstable() {
|
||||
if (_sst) {
|
||||
if (_sst && _use_backlog_tracker) {
|
||||
_table_s.get_backlog_tracker().revert_charges(_sst);
|
||||
}
|
||||
_sst = {};
|
||||
}
|
||||
|
||||
compaction_read_monitor(sstables::shared_sstable sst, table_state& table_s)
|
||||
: _sst(std::move(sst)), _table_s(table_s) { }
|
||||
compaction_read_monitor(sstables::shared_sstable sst, table_state& table_s, use_backlog_tracker use_backlog_tracker)
|
||||
: _sst(std::move(sst)), _table_s(table_s), _use_backlog_tracker(use_backlog_tracker) { }
|
||||
|
||||
~compaction_read_monitor() {
|
||||
// We failed to finish handling this SSTable, so we have to update the backlog_tracker
|
||||
// about it.
|
||||
if (_sst) {
|
||||
if (_sst && _use_backlog_tracker) {
|
||||
_table_s.get_backlog_tracker().revert_charges(_sst);
|
||||
}
|
||||
}
|
||||
@@ -373,12 +378,16 @@ struct compaction_read_monitor_generator final : public read_monitor_generator {
|
||||
};
|
||||
|
||||
virtual sstables::read_monitor& operator()(sstables::shared_sstable sst) override {
|
||||
auto p = _generated_monitors.emplace(sst->generation(), compaction_read_monitor(sst, _table_s));
|
||||
auto p = _generated_monitors.emplace(sst->generation(), compaction_read_monitor(sst, _table_s, _use_backlog_tracker));
|
||||
return p.first->second;
|
||||
}
|
||||
|
||||
explicit compaction_read_monitor_generator(table_state& table_s)
|
||||
: _table_s(table_s) {}
|
||||
explicit compaction_read_monitor_generator(table_state& table_s, use_backlog_tracker use_backlog_tracker = use_backlog_tracker::yes)
|
||||
: _table_s(table_s), _use_backlog_tracker(use_backlog_tracker) {}
|
||||
|
||||
uint64_t compacted() const {
|
||||
return boost::accumulate(_generated_monitors | boost::adaptors::map_values | boost::adaptors::transformed([](auto& monitor) { return monitor.compacted(); }), uint64_t(0));
|
||||
}
|
||||
|
||||
void remove_exhausted_sstables(const std::vector<sstables::shared_sstable>& exhausted_sstables) {
|
||||
for (auto& sst : exhausted_sstables) {
|
||||
@@ -391,8 +400,29 @@ struct compaction_read_monitor_generator final : public read_monitor_generator {
|
||||
private:
|
||||
table_state& _table_s;
|
||||
std::unordered_map<generation_type, compaction_read_monitor> _generated_monitors;
|
||||
use_backlog_tracker _use_backlog_tracker;
|
||||
|
||||
friend class compaction_progress_monitor;
|
||||
};
|
||||
|
||||
void compaction_progress_monitor::set_generator(std::unique_ptr<read_monitor_generator> generator) {
|
||||
_generator = std::move(generator);
|
||||
}
|
||||
|
||||
void compaction_progress_monitor::reset_generator() {
|
||||
if (_generator) {
|
||||
_progress = dynamic_cast<compaction_read_monitor_generator&>(*_generator).compacted();
|
||||
}
|
||||
_generator = nullptr;
|
||||
}
|
||||
|
||||
uint64_t compaction_progress_monitor::get_progress() const {
|
||||
if (_generator) {
|
||||
return dynamic_cast<compaction_read_monitor_generator&>(*_generator).compacted();
|
||||
}
|
||||
return _progress;
|
||||
}
|
||||
|
||||
class formatted_sstables_list {
|
||||
bool _include_origin = true;
|
||||
std::vector<std::string> _ssts;
|
||||
@@ -466,12 +496,15 @@ protected:
|
||||
std::vector<shared_sstable> _used_garbage_collected_sstables;
|
||||
utils::observable<> _stop_request_observable;
|
||||
private:
|
||||
// Keeps track of monitors for input sstable.
|
||||
// If _update_backlog_tracker is set to true, monitors are responsible for adjusting backlog as compaction progresses.
|
||||
compaction_progress_monitor& _progress_monitor;
|
||||
compaction_data& init_compaction_data(compaction_data& cdata, const compaction_descriptor& descriptor) const {
|
||||
cdata.compaction_fan_in = descriptor.fan_in();
|
||||
return cdata;
|
||||
}
|
||||
protected:
|
||||
compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata)
|
||||
compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_progress_monitor& progress_monitor, use_backlog_tracker use_backlog_tracker)
|
||||
: _cdata(init_compaction_data(cdata, descriptor))
|
||||
, _table_s(table_s)
|
||||
, _sstable_creator(std::move(descriptor.creator))
|
||||
@@ -490,6 +523,7 @@ protected:
|
||||
, _owned_ranges(std::move(descriptor.owned_ranges))
|
||||
, _sharder(descriptor.sharder)
|
||||
, _owned_ranges_checker(_owned_ranges ? std::optional<dht::incremental_owned_ranges_checker>(*_owned_ranges) : std::nullopt)
|
||||
, _progress_monitor(progress_monitor)
|
||||
{
|
||||
for (auto& sst : _sstables) {
|
||||
_stats_collector.update(sst->get_encoding_stats_for_compaction());
|
||||
@@ -498,6 +532,14 @@ protected:
|
||||
_contains_multi_fragment_runs = std::any_of(_sstables.begin(), _sstables.end(), [&ssts_run_ids] (shared_sstable& sst) {
|
||||
return !ssts_run_ids.insert(sst->run_identifier()).second;
|
||||
});
|
||||
_progress_monitor.set_generator(std::make_unique<compaction_read_monitor_generator>(_table_s, use_backlog_tracker));
|
||||
}
|
||||
|
||||
read_monitor_generator& unwrap_monitor_generator() const {
|
||||
if (_progress_monitor._generator) {
|
||||
return *_progress_monitor._generator;
|
||||
}
|
||||
return default_read_monitor_generator();
|
||||
}
|
||||
|
||||
virtual uint64_t partitions_per_sstable() const {
|
||||
@@ -621,6 +663,7 @@ public:
|
||||
compaction& operator=(compaction&& other) = delete;
|
||||
|
||||
virtual ~compaction() {
|
||||
_progress_monitor.reset_generator();
|
||||
}
|
||||
private:
|
||||
// Default range sstable reader that will only return mutation that belongs to current shard.
|
||||
@@ -706,6 +749,7 @@ private:
|
||||
continue;
|
||||
}
|
||||
|
||||
_cdata.compaction_size += sst->data_size();
|
||||
// We also capture the sstable, so we keep it alive while the read isn't done
|
||||
ssts->insert(sst);
|
||||
// FIXME: If the sstables have cardinality estimation bitmaps, use that
|
||||
@@ -1042,13 +1086,10 @@ void compacted_fragments_writer::consume_end_of_stream() {
|
||||
}
|
||||
|
||||
class regular_compaction : public compaction {
|
||||
// keeps track of monitors for input sstable, which are responsible for adjusting backlog as compaction progresses.
|
||||
mutable compaction_read_monitor_generator _monitor_generator;
|
||||
seastar::semaphore _replacer_lock = {1};
|
||||
public:
|
||||
regular_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata)
|
||||
: compaction(table_s, std::move(descriptor), cdata)
|
||||
, _monitor_generator(_table_s)
|
||||
regular_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_progress_monitor& progress_monitor, use_backlog_tracker use_backlog_tracker = use_backlog_tracker::yes)
|
||||
: compaction(table_s, std::move(descriptor), cdata, progress_monitor, use_backlog_tracker)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1066,7 +1107,7 @@ public:
|
||||
std::move(trace),
|
||||
sm_fwd,
|
||||
mr_fwd,
|
||||
_monitor_generator);
|
||||
unwrap_monitor_generator());
|
||||
}
|
||||
|
||||
std::string_view report_start_desc() const override {
|
||||
@@ -1137,7 +1178,7 @@ private:
|
||||
log_debug("Replacing earlier exhausted sstable(s) {} by new sstable(s) {}", formatted_sstables_list(exhausted_ssts, false), formatted_sstables_list(_new_unused_sstables, true));
|
||||
_replacer(get_compaction_completion_desc(exhausted_ssts, std::move(_new_unused_sstables)));
|
||||
_sstables.erase(exhausted, _sstables.end());
|
||||
_monitor_generator.remove_exhausted_sstables(exhausted_ssts);
|
||||
dynamic_cast<compaction_read_monitor_generator&>(unwrap_monitor_generator()).remove_exhausted_sstables(exhausted_ssts);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1184,8 +1225,8 @@ private:
|
||||
return bool(_replacer);
|
||||
}
|
||||
public:
|
||||
reshape_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata) {
|
||||
reshape_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_progress_monitor& progress_monitor)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata, progress_monitor, use_backlog_tracker::no) {
|
||||
}
|
||||
|
||||
virtual sstables::sstable_set make_sstable_set_for_input() const override {
|
||||
@@ -1211,7 +1252,7 @@ public:
|
||||
std::move(trace),
|
||||
sm_fwd,
|
||||
mr_fwd,
|
||||
default_read_monitor_generator());
|
||||
unwrap_monitor_generator());
|
||||
}
|
||||
|
||||
std::string_view report_start_desc() const override {
|
||||
@@ -1271,8 +1312,8 @@ protected:
|
||||
}
|
||||
|
||||
public:
|
||||
cleanup_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata)
|
||||
cleanup_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_progress_monitor& progress_monitor)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata, progress_monitor)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1501,8 +1542,8 @@ private:
|
||||
mutable uint64_t _validation_errors = 0;
|
||||
|
||||
public:
|
||||
scrub_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_type_options::scrub options)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata)
|
||||
scrub_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_type_options::scrub options, compaction_progress_monitor& progress_monitor)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata, progress_monitor, use_backlog_tracker::no)
|
||||
, _options(options)
|
||||
, _scrub_start_description(fmt::format("Scrubbing in {} mode", _options.operation_mode))
|
||||
, _scrub_finish_description(fmt::format("Finished scrubbing in {} mode", _options.operation_mode)) {
|
||||
@@ -1529,7 +1570,7 @@ public:
|
||||
if (!range.is_full()) {
|
||||
on_internal_error(clogger, fmt::format("Scrub compaction in mode {} expected full partition range, but got {} instead", _options.operation_mode, range));
|
||||
}
|
||||
auto crawling_reader = _compacting->make_crawling_reader(std::move(s), std::move(permit), nullptr);
|
||||
auto crawling_reader = _compacting->make_crawling_reader(std::move(s), std::move(permit), nullptr, unwrap_monitor_generator());
|
||||
return make_flat_mutation_reader_v2<reader>(std::move(crawling_reader), _options.operation_mode, _validation_errors);
|
||||
}
|
||||
|
||||
@@ -1598,8 +1639,8 @@ private:
|
||||
_table_s.get_compaction_strategy().adjust_partition_estimate(_ms_metadata, _estimation_per_shard[s].estimated_partitions));
|
||||
}
|
||||
public:
|
||||
resharding_compaction(table_state& table_s, sstables::compaction_descriptor descriptor, compaction_data& cdata)
|
||||
: compaction(table_s, std::move(descriptor), cdata)
|
||||
resharding_compaction(table_state& table_s, sstables::compaction_descriptor descriptor, compaction_data& cdata, compaction_progress_monitor& progress_monitor)
|
||||
: compaction(table_s, std::move(descriptor), cdata, progress_monitor, use_backlog_tracker::no)
|
||||
, _estimation_per_shard(smp::count)
|
||||
, _run_identifiers(smp::count)
|
||||
{
|
||||
@@ -1633,7 +1674,8 @@ public:
|
||||
slice,
|
||||
nullptr,
|
||||
sm_fwd,
|
||||
mr_fwd);
|
||||
mr_fwd,
|
||||
unwrap_monitor_generator());
|
||||
|
||||
}
|
||||
|
||||
@@ -1705,47 +1747,49 @@ compaction_type compaction_type_options::type() const {
|
||||
return index_to_type[_options.index()];
|
||||
}
|
||||
|
||||
static std::unique_ptr<compaction> make_compaction(table_state& table_s, sstables::compaction_descriptor descriptor, compaction_data& cdata) {
|
||||
static std::unique_ptr<compaction> make_compaction(table_state& table_s, sstables::compaction_descriptor descriptor, compaction_data& cdata, compaction_progress_monitor& progress_monitor) {
|
||||
struct {
|
||||
table_state& table_s;
|
||||
sstables::compaction_descriptor&& descriptor;
|
||||
compaction_data& cdata;
|
||||
compaction_progress_monitor& progress_monitor;
|
||||
|
||||
std::unique_ptr<compaction> operator()(compaction_type_options::reshape) {
|
||||
return std::make_unique<reshape_compaction>(table_s, std::move(descriptor), cdata);
|
||||
return std::make_unique<reshape_compaction>(table_s, std::move(descriptor), cdata, progress_monitor);
|
||||
}
|
||||
std::unique_ptr<compaction> operator()(compaction_type_options::reshard) {
|
||||
return std::make_unique<resharding_compaction>(table_s, std::move(descriptor), cdata);
|
||||
return std::make_unique<resharding_compaction>(table_s, std::move(descriptor), cdata, progress_monitor);
|
||||
}
|
||||
std::unique_ptr<compaction> operator()(compaction_type_options::regular) {
|
||||
return std::make_unique<regular_compaction>(table_s, std::move(descriptor), cdata);
|
||||
return std::make_unique<regular_compaction>(table_s, std::move(descriptor), cdata, progress_monitor);
|
||||
}
|
||||
std::unique_ptr<compaction> operator()(compaction_type_options::cleanup) {
|
||||
return std::make_unique<cleanup_compaction>(table_s, std::move(descriptor), cdata);
|
||||
return std::make_unique<cleanup_compaction>(table_s, std::move(descriptor), cdata, progress_monitor);
|
||||
}
|
||||
std::unique_ptr<compaction> operator()(compaction_type_options::upgrade) {
|
||||
return std::make_unique<cleanup_compaction>(table_s, std::move(descriptor), cdata);
|
||||
return std::make_unique<cleanup_compaction>(table_s, std::move(descriptor), cdata, progress_monitor);
|
||||
}
|
||||
std::unique_ptr<compaction> operator()(compaction_type_options::scrub scrub_options) {
|
||||
return std::make_unique<scrub_compaction>(table_s, std::move(descriptor), cdata, scrub_options);
|
||||
return std::make_unique<scrub_compaction>(table_s, std::move(descriptor), cdata, scrub_options, progress_monitor);
|
||||
}
|
||||
} visitor_factory{table_s, std::move(descriptor), cdata};
|
||||
} visitor_factory{table_s, std::move(descriptor), cdata, progress_monitor};
|
||||
|
||||
return descriptor.options.visit(visitor_factory);
|
||||
}
|
||||
|
||||
static future<compaction_result> scrub_sstables_validate_mode(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s) {
|
||||
static future<compaction_result> scrub_sstables_validate_mode(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s, read_monitor_generator& monitor_generator) {
|
||||
auto schema = table_s.schema();
|
||||
auto permit = table_s.make_compaction_reader_permit();
|
||||
|
||||
uint64_t validation_errors = 0;
|
||||
cdata.compaction_size = boost::accumulate(descriptor.sstables | boost::adaptors::transformed([] (auto& sst) { return sst->data_size(); }), int64_t(0));
|
||||
|
||||
for (const auto& sst : descriptor.sstables) {
|
||||
clogger.info("Scrubbing in validate mode {}", sst->get_filename());
|
||||
|
||||
validation_errors += co_await sst->validate(permit, cdata.abort, [&schema] (sstring what) {
|
||||
scrub_compaction::report_validation_error(compaction_type::Scrub, *schema, what);
|
||||
});
|
||||
}, monitor_generator(sst));
|
||||
// Did validation actually finish because aborted?
|
||||
if (cdata.is_stop_requested()) {
|
||||
// Compaction manager will catch this exception and re-schedule the compaction.
|
||||
@@ -1771,8 +1815,15 @@ static future<compaction_result> scrub_sstables_validate_mode(sstables::compacti
|
||||
};
|
||||
}
|
||||
|
||||
future<compaction_result> scrub_sstables_validate_mode(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s, compaction_progress_monitor& progress_monitor) {
|
||||
progress_monitor.set_generator(std::make_unique<compaction_read_monitor_generator>(table_s, use_backlog_tracker::no));
|
||||
auto d = defer([&] { progress_monitor.reset_generator(); });
|
||||
auto res = co_await scrub_sstables_validate_mode(descriptor, cdata, table_s, *progress_monitor._generator);
|
||||
co_return res;
|
||||
}
|
||||
|
||||
future<compaction_result>
|
||||
compact_sstables(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s) {
|
||||
compact_sstables(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s, compaction_progress_monitor& progress_monitor) {
|
||||
if (descriptor.sstables.empty()) {
|
||||
return make_exception_future<compaction_result>(std::runtime_error(format("Called {} compaction with empty set on behalf of {}.{}",
|
||||
compaction_name(descriptor.options.type()), table_s.schema()->ks_name(), table_s.schema()->cf_name())));
|
||||
@@ -1780,9 +1831,9 @@ compact_sstables(sstables::compaction_descriptor descriptor, compaction_data& cd
|
||||
if (descriptor.options.type() == compaction_type::Scrub
|
||||
&& std::get<compaction_type_options::scrub>(descriptor.options.options()).operation_mode == compaction_type_options::scrub::mode::validate) {
|
||||
// Bypass the usual compaction machinery for dry-mode scrub
|
||||
return scrub_sstables_validate_mode(std::move(descriptor), cdata, table_s);
|
||||
return scrub_sstables_validate_mode(std::move(descriptor), cdata, table_s, progress_monitor);
|
||||
}
|
||||
return compaction::run(make_compaction(table_s, std::move(descriptor), cdata));
|
||||
return compaction::run(make_compaction(table_s, std::move(descriptor), cdata, progress_monitor));
|
||||
}
|
||||
|
||||
std::unordered_set<sstables::shared_sstable>
|
||||
@@ -1800,7 +1851,7 @@ get_fully_expired_sstables(const table_state& table_s, const std::vector<sstable
|
||||
int64_t min_timestamp = std::numeric_limits<int64_t>::max();
|
||||
|
||||
for (auto& sstable : overlapping) {
|
||||
auto gc_before = sstable->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before = sstable->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
if (sstable->get_max_local_deletion_time() >= gc_before) {
|
||||
min_timestamp = std::min(min_timestamp, sstable->get_stats_metadata().min_timestamp);
|
||||
}
|
||||
@@ -1819,7 +1870,7 @@ get_fully_expired_sstables(const table_state& table_s, const std::vector<sstable
|
||||
|
||||
// SStables that do not contain live data is added to list of possibly expired sstables.
|
||||
for (auto& candidate : compacting) {
|
||||
auto gc_before = candidate->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before = candidate->get_gc_before_for_fully_expire(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
clogger.debug("Checking if candidate of generation {} and max_deletion_time {} is expired, gc_before is {}",
|
||||
candidate->generation(), candidate->get_stats_metadata().max_local_deletion_time, gc_before);
|
||||
// A fully expired sstable which has an ancestor undeleted shouldn't be compacted because
|
||||
|
||||
@@ -48,6 +48,7 @@ struct compaction_info {
|
||||
};
|
||||
|
||||
struct compaction_data {
|
||||
uint64_t compaction_size = 0;
|
||||
uint64_t total_partitions = 0;
|
||||
uint64_t total_keys_written = 0;
|
||||
sstring stop_requested;
|
||||
@@ -100,12 +101,27 @@ struct compaction_result {
|
||||
compaction_stats stats;
|
||||
};
|
||||
|
||||
class read_monitor_generator;
|
||||
|
||||
class compaction_progress_monitor {
|
||||
std::unique_ptr<read_monitor_generator> _generator = nullptr;
|
||||
uint64_t _progress = 0;
|
||||
public:
|
||||
void set_generator(std::unique_ptr<read_monitor_generator> generator);
|
||||
void reset_generator();
|
||||
// Returns number of bytes processed with _generator.
|
||||
uint64_t get_progress() const;
|
||||
|
||||
friend class compaction;
|
||||
friend future<compaction_result> scrub_sstables_validate_mode(sstables::compaction_descriptor, compaction_data&, table_state&, compaction_progress_monitor&);
|
||||
};
|
||||
|
||||
// Compact a list of N sstables into M sstables.
|
||||
// Returns info about the finished compaction, which includes vector to new sstables.
|
||||
//
|
||||
// compaction_descriptor is responsible for specifying the type of compaction, and influencing
|
||||
// compaction behavior through its available member fields.
|
||||
future<compaction_result> compact_sstables(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s);
|
||||
future<compaction_result> compact_sstables(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s, compaction_progress_monitor& progress_monitor);
|
||||
|
||||
// Return list of expired sstables for column family cf.
|
||||
// A sstable is fully expired *iff* its max_local_deletion_time precedes gc_before and its
|
||||
|
||||
@@ -436,7 +436,7 @@ future<sstables::compaction_result> compaction_task_executor::compact_sstables(s
|
||||
}
|
||||
}
|
||||
|
||||
co_return co_await sstables::compact_sstables(std::move(descriptor), cdata, t);
|
||||
co_return co_await sstables::compact_sstables(std::move(descriptor), cdata, t, _progress_monitor);
|
||||
}
|
||||
future<> compaction_task_executor::update_history(table_state& t, const sstables::compaction_result& res, const sstables::compaction_data& cdata) {
|
||||
auto ended_at = std::chrono::duration_cast<std::chrono::milliseconds>(res.stats.ended_at.time_since_epoch());
|
||||
@@ -477,12 +477,17 @@ public:
|
||||
: compaction_task_executor(mgr, do_throw_if_stopping, t, compaction_type, std::move(desc))
|
||||
, sstables_compaction_task_impl(mgr._task_manager_module, tasks::task_id::create_random_id(), 0, "compaction group", t->schema()->ks_name(), t->schema()->cf_name(), std::move(entity), parent_id)
|
||||
{
|
||||
_status.progress_units = "bytes";
|
||||
set_sstables(std::move(sstables));
|
||||
}
|
||||
|
||||
virtual ~sstables_task_executor() = default;
|
||||
|
||||
virtual void release_resources() noexcept override;
|
||||
|
||||
virtual future<tasks::task_manager::task::progress> get_progress() const override {
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
protected:
|
||||
virtual future<> run() override {
|
||||
return perform();
|
||||
@@ -497,7 +502,13 @@ public:
|
||||
tasks::task_id parent_id)
|
||||
: compaction_task_executor(mgr, do_throw_if_stopping, t, sstables::compaction_type::Compaction, "Major compaction")
|
||||
, major_compaction_task_impl(mgr._task_manager_module, tasks::task_id::create_random_id(), 0, "compaction group", t->schema()->ks_name(), t->schema()->cf_name(), "", parent_id)
|
||||
{}
|
||||
{
|
||||
_status.progress_units = "bytes";
|
||||
}
|
||||
|
||||
virtual future<tasks::task_manager::task::progress> get_progress() const override {
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
protected:
|
||||
virtual future<> run() override {
|
||||
return perform();
|
||||
@@ -592,18 +603,24 @@ future<> compaction_manager::perform_major_compaction(table_state& t, std::optio
|
||||
namespace compaction {
|
||||
|
||||
class custom_compaction_task_executor : public compaction_task_executor, public compaction_task_impl {
|
||||
noncopyable_function<future<>(sstables::compaction_data&)> _job;
|
||||
noncopyable_function<future<>(sstables::compaction_data&, sstables::compaction_progress_monitor&)> _job;
|
||||
|
||||
public:
|
||||
custom_compaction_task_executor(compaction_manager& mgr, throw_if_stopping do_throw_if_stopping, table_state* t, tasks::task_id parent_id, sstables::compaction_type type, sstring desc, noncopyable_function<future<>(sstables::compaction_data&)> job)
|
||||
custom_compaction_task_executor(compaction_manager& mgr, throw_if_stopping do_throw_if_stopping, table_state* t, tasks::task_id parent_id, sstables::compaction_type type, sstring desc, noncopyable_function<future<>(sstables::compaction_data&, sstables::compaction_progress_monitor&)> job)
|
||||
: compaction_task_executor(mgr, do_throw_if_stopping, t, type, std::move(desc))
|
||||
, compaction_task_impl(mgr._task_manager_module, tasks::task_id::create_random_id(), 0, "compaction group", t->schema()->ks_name(), t->schema()->cf_name(), "", parent_id)
|
||||
, _job(std::move(job))
|
||||
{}
|
||||
{
|
||||
_status.progress_units = "bytes";
|
||||
}
|
||||
|
||||
virtual std::string type() const override {
|
||||
return fmt::format("{} compaction", compaction_type());
|
||||
}
|
||||
|
||||
virtual future<tasks::task_manager::task::progress> get_progress() const override {
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
protected:
|
||||
virtual future<> run() override {
|
||||
return perform();
|
||||
@@ -624,7 +641,7 @@ protected:
|
||||
// NOTE:
|
||||
// no need to register shared sstables because they're excluded from non-resharding
|
||||
// compaction and some of them may not even belong to current shard.
|
||||
co_await _job(compaction_data());
|
||||
co_await _job(compaction_data(), _progress_monitor);
|
||||
finish_compaction();
|
||||
|
||||
co_return std::nullopt;
|
||||
@@ -633,7 +650,7 @@ protected:
|
||||
|
||||
}
|
||||
|
||||
future<> compaction_manager::run_custom_job(table_state& t, sstables::compaction_type type, const char* desc, noncopyable_function<future<>(sstables::compaction_data&)> job, std::optional<tasks::task_info> info, throw_if_stopping do_throw_if_stopping) {
|
||||
future<> compaction_manager::run_custom_job(table_state& t, sstables::compaction_type type, const char* desc, noncopyable_function<future<>(sstables::compaction_data&, sstables::compaction_progress_monitor&)> job, std::optional<tasks::task_info> info, throw_if_stopping do_throw_if_stopping) {
|
||||
auto gh = start_compaction(t);
|
||||
if (!gh) {
|
||||
co_return;
|
||||
@@ -1291,12 +1308,17 @@ public:
|
||||
, offstrategy_compaction_task_impl(mgr._task_manager_module, tasks::task_id::create_random_id(), parent_id ? 0 : mgr._task_manager_module->new_sequence_number(), "compaction group", t->schema()->ks_name(), t->schema()->cf_name(), "", parent_id)
|
||||
, _performed(performed)
|
||||
{
|
||||
_status.progress_units = "bytes";
|
||||
_performed = false;
|
||||
}
|
||||
|
||||
bool performed() const noexcept {
|
||||
return _performed;
|
||||
}
|
||||
|
||||
virtual future<tasks::task_manager::task::progress> get_progress() const override {
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
protected:
|
||||
virtual future<> run() override {
|
||||
return perform();
|
||||
@@ -1570,7 +1592,7 @@ private:
|
||||
sstables::compaction_descriptor::default_max_sstable_bytes,
|
||||
sst->run_identifier(),
|
||||
sstables::compaction_type_options::make_scrub(sstables::compaction_type_options::scrub::mode::validate));
|
||||
co_return co_await sstables::compact_sstables(std::move(desc), _compaction_data, *_compacting_table);
|
||||
co_return co_await sstables::compact_sstables(std::move(desc), _compaction_data, *_compacting_table, _progress_monitor);
|
||||
} catch (sstables::compaction_stopped_exception&) {
|
||||
// ignore, will be handled by can_proceed()
|
||||
} catch (storage_io_error& e) {
|
||||
@@ -1630,6 +1652,7 @@ public:
|
||||
// will have more space available released by previous jobs.
|
||||
std::ranges::sort(_pending_cleanup_jobs, std::ranges::greater(), std::mem_fn(&sstables::compaction_descriptor::sstables_size));
|
||||
_cm._stats.pending_tasks += _pending_cleanup_jobs.size();
|
||||
_status.progress_units = "bytes";
|
||||
}
|
||||
|
||||
virtual ~cleanup_sstables_compaction_task_executor() = default;
|
||||
@@ -1640,6 +1663,10 @@ public:
|
||||
_compacting.release_all();
|
||||
_owned_ranges_ptr = nullptr;
|
||||
}
|
||||
|
||||
virtual future<tasks::task_manager::task::progress> get_progress() const override {
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
protected:
|
||||
virtual future<> run() override {
|
||||
return perform();
|
||||
|
||||
@@ -345,7 +345,7 @@ public:
|
||||
// parameter type is the compaction type the operation can most closely be
|
||||
// associated with, use compaction_type::Compaction, if none apply.
|
||||
// parameter job is a function that will carry the operation
|
||||
future<> run_custom_job(compaction::table_state& s, sstables::compaction_type type, const char *desc, noncopyable_function<future<>(sstables::compaction_data&)> job, std::optional<tasks::task_info> info, throw_if_stopping do_throw_if_stopping);
|
||||
future<> run_custom_job(compaction::table_state& s, sstables::compaction_type type, const char *desc, noncopyable_function<future<>(sstables::compaction_data&, sstables::compaction_progress_monitor&)> job, std::optional<tasks::task_info> info, throw_if_stopping do_throw_if_stopping);
|
||||
|
||||
class compaction_reenabler {
|
||||
compaction_manager& _cm;
|
||||
@@ -475,6 +475,7 @@ protected:
|
||||
sstables::compaction_data _compaction_data;
|
||||
state _state = state::none;
|
||||
throw_if_stopping _do_throw_if_stopping;
|
||||
sstables::compaction_progress_monitor _progress_monitor;
|
||||
|
||||
private:
|
||||
shared_future<compaction_manager::compaction_stats_opt> _compaction_done = make_ready_future<compaction_manager::compaction_stats_opt>();
|
||||
|
||||
@@ -51,7 +51,7 @@ std::vector<compaction_descriptor> compaction_strategy_impl::get_cleanup_compact
|
||||
}));
|
||||
}
|
||||
|
||||
bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const tombstone_gc_state& gc_state) {
|
||||
bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const table_state& t) {
|
||||
if (_disable_tombstone_compaction) {
|
||||
return false;
|
||||
}
|
||||
@@ -62,7 +62,7 @@ bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& s
|
||||
if (db_clock::now()-_tombstone_compaction_interval < sst->data_file_write_time()) {
|
||||
return false;
|
||||
}
|
||||
auto gc_before = sst->get_gc_before_for_drop_estimation(compaction_time, gc_state);
|
||||
auto gc_before = sst->get_gc_before_for_drop_estimation(compaction_time, t.get_tombstone_gc_state(), t.schema());
|
||||
return sst->estimate_droppable_tombstone_ratio(gc_before) >= _tombstone_threshold;
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ public:
|
||||
|
||||
// Check if a given sstable is entitled for tombstone compaction based on its
|
||||
// droppable tombstone histogram and gc_before.
|
||||
bool worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const tombstone_gc_state& gc_state);
|
||||
bool worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time, const table_state& t);
|
||||
|
||||
virtual std::unique_ptr<compaction_backlog_tracker::impl> make_backlog_tracker() const = 0;
|
||||
|
||||
|
||||
@@ -51,15 +51,15 @@ compaction_descriptor leveled_compaction_strategy::get_sstables_for_compaction(t
|
||||
auto& sstables = manifest.get_level(level);
|
||||
// filter out sstables which droppable tombstone ratio isn't greater than the defined threshold.
|
||||
auto e = boost::range::remove_if(sstables, [this, compaction_time, &table_s] (const sstables::shared_sstable& sst) -> bool {
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s.get_tombstone_gc_state());
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s);
|
||||
});
|
||||
sstables.erase(e, sstables.end());
|
||||
if (sstables.empty()) {
|
||||
continue;
|
||||
}
|
||||
auto& sst = *std::max_element(sstables.begin(), sstables.end(), [&] (auto& i, auto& j) {
|
||||
auto gc_before1 = i->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before2 = j->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state());
|
||||
auto gc_before1 = i->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
auto gc_before2 = j->get_gc_before_for_drop_estimation(compaction_time, table_s.get_tombstone_gc_state(), table_s.schema());
|
||||
return i->estimate_droppable_tombstone_ratio(gc_before1) < j->estimate_droppable_tombstone_ratio(gc_before2);
|
||||
});
|
||||
return sstables::compaction_descriptor({ sst }, sst->get_sstable_level());
|
||||
|
||||
@@ -243,7 +243,7 @@ size_tiered_compaction_strategy::get_sstables_for_compaction(table_state& table_
|
||||
for (auto&& sstables : buckets | boost::adaptors::reversed) {
|
||||
// filter out sstables which droppable tombstone ratio isn't greater than the defined threshold.
|
||||
auto e = boost::range::remove_if(sstables, [this, compaction_time, &table_s] (const sstables::shared_sstable& sst) -> bool {
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s.get_tombstone_gc_state());
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s);
|
||||
});
|
||||
sstables.erase(e, sstables.end());
|
||||
if (sstables.empty()) {
|
||||
|
||||
@@ -156,7 +156,7 @@ future<> reshard(sstables::sstable_directory& dir, sstables::sstable_directory::
|
||||
// jobs. But only one will run in parallel at a time
|
||||
auto& t = table.as_table_state();
|
||||
co_await coroutine::parallel_for_each(buckets, [&] (std::vector<sstables::shared_sstable>& sstlist) mutable {
|
||||
return table.get_compaction_manager().run_custom_job(table.as_table_state(), sstables::compaction_type::Reshard, "Reshard compaction", [&] (sstables::compaction_data& info) -> future<> {
|
||||
return table.get_compaction_manager().run_custom_job(table.as_table_state(), sstables::compaction_type::Reshard, "Reshard compaction", [&] (sstables::compaction_data& info, sstables::compaction_progress_monitor& progress_monitor) -> future<> {
|
||||
auto erm = table.get_effective_replication_map(); // keep alive around compaction.
|
||||
|
||||
sstables::compaction_descriptor desc(sstlist);
|
||||
@@ -165,7 +165,7 @@ future<> reshard(sstables::sstable_directory& dir, sstables::sstable_directory::
|
||||
desc.sharder = &erm->get_sharder(*table.schema());
|
||||
desc.owned_ranges = owned_ranges_ptr;
|
||||
|
||||
auto result = co_await sstables::compact_sstables(std::move(desc), info, t);
|
||||
auto result = co_await sstables::compact_sstables(std::move(desc), info, t, progress_monitor);
|
||||
// input sstables are moved, to guarantee their resources are released once we're done
|
||||
// resharding them.
|
||||
co_await when_all_succeed(dir.collect_output_unshared_sstables(std::move(result.new_sstables), sstables::sstable_directory::can_be_remote::yes), dir.remove_sstables(std::move(sstlist))).discard_result();
|
||||
@@ -254,6 +254,17 @@ future<> run_table_tasks(replica::database& db, std::vector<table_tasks_info> ta
|
||||
}
|
||||
}
|
||||
|
||||
future<tasks::task_manager::task::progress> compaction_task_impl::get_progress(const sstables::compaction_data& cdata, const sstables::compaction_progress_monitor& progress_monitor) const {
|
||||
if (cdata.compaction_size == 0) {
|
||||
co_return get_binary_progress();
|
||||
}
|
||||
|
||||
co_return tasks::task_manager::task::progress{
|
||||
.completed = is_done() ? cdata.compaction_size : progress_monitor.get_progress(), // Consider tasks which skip all files.
|
||||
.total = cdata.compaction_size
|
||||
};
|
||||
}
|
||||
|
||||
future<> major_keyspace_compaction_task_impl::run() {
|
||||
co_await _db.invoke_on_all([&] (replica::database& db) -> future<> {
|
||||
tasks::task_info parent_info{_status.id, _status.shard};
|
||||
@@ -452,8 +463,8 @@ future<> shard_reshaping_compaction_task_impl::run() {
|
||||
|
||||
std::exception_ptr ex;
|
||||
try {
|
||||
co_await table.get_compaction_manager().run_custom_job(table.as_table_state(), sstables::compaction_type::Reshape, "Reshape compaction", [&dir = _dir, &table, sstlist = std::move(sstlist), desc = std::move(desc)] (sstables::compaction_data& info) mutable -> future<> {
|
||||
sstables::compaction_result result = co_await sstables::compact_sstables(std::move(desc), info, table.as_table_state());
|
||||
co_await table.get_compaction_manager().run_custom_job(table.as_table_state(), sstables::compaction_type::Reshape, "Reshape compaction", [&dir = _dir, &table, sstlist = std::move(sstlist), desc = std::move(desc)] (sstables::compaction_data& info, sstables::compaction_progress_monitor& progress_monitor) mutable -> future<> {
|
||||
sstables::compaction_result result = co_await sstables::compact_sstables(std::move(desc), info, table.as_table_state(), progress_monitor);
|
||||
co_await dir.remove_unshared_sstables(std::move(sstlist));
|
||||
co_await dir.collect_output_unshared_sstables(std::move(result.new_sstables), sstables::sstable_directory::can_be_remote::no);
|
||||
}, info, throw_if_stopping::yes);
|
||||
|
||||
@@ -41,6 +41,8 @@ public:
|
||||
virtual std::string type() const override = 0;
|
||||
protected:
|
||||
virtual future<> run() override = 0;
|
||||
|
||||
future<tasks::task_manager::task::progress> get_progress(const sstables::compaction_data& cdata, const sstables::compaction_progress_monitor& progress_monitor) const;
|
||||
};
|
||||
|
||||
class major_compaction_task_impl : public compaction_task_impl {
|
||||
|
||||
@@ -366,7 +366,7 @@ time_window_compaction_strategy::get_next_non_expired_sstables(table_state& tabl
|
||||
// if there is no sstable to compact in standard way, try compacting single sstable whose droppable tombstone
|
||||
// ratio is greater than threshold.
|
||||
auto e = boost::range::remove_if(non_expiring_sstables, [this, compaction_time, &table_s] (const shared_sstable& sst) -> bool {
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s.get_tombstone_gc_state());
|
||||
return !worth_dropping_tombstones(sst, compaction_time, table_s);
|
||||
});
|
||||
non_expiring_sstables.erase(e, non_expiring_sstables.end());
|
||||
if (non_expiring_sstables.empty()) {
|
||||
|
||||
105
configure.py
105
configure.py
@@ -27,16 +27,6 @@ tempfile.tempdir = f"{outdir}/tmp"
|
||||
|
||||
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:] if not x.startswith('--out=')])
|
||||
|
||||
employ_ld_trickery = True
|
||||
|
||||
# distro-specific setup
|
||||
def distro_setup_nix():
|
||||
global employ_ld_trickery
|
||||
employ_ld_trickery = False
|
||||
|
||||
if os.environ.get('NIX_CC'):
|
||||
distro_setup_nix()
|
||||
|
||||
# distribution "internationalization", converting package names.
|
||||
# Fedora name is key, values is distro -> package name dict.
|
||||
i18n_xlat = {
|
||||
@@ -1313,8 +1303,6 @@ idls = ['idl/gossip_digest.idl.hh',
|
||||
'idl/utils.idl.hh',
|
||||
]
|
||||
|
||||
headers = find_headers('.', excluded_dirs=['idl', 'build', 'seastar', '.git'])
|
||||
|
||||
scylla_tests_generic_dependencies = [
|
||||
'test/lib/cql_test_env.cc',
|
||||
'test/lib/test_services.cc',
|
||||
@@ -1512,26 +1500,29 @@ wasm_deps['wasm/test_UDA_final.wat'] = 'test/resource/wasm/c/test_UDA_final.c'
|
||||
wasm_deps['wasm/test_UDA_scalar.wat'] = 'test/resource/wasm/c/test_UDA_scalar.c'
|
||||
wasm_deps['wasm/test_word_double.wat'] = 'test/resource/wasm/c/test_word_double.c'
|
||||
|
||||
warnings = [
|
||||
'-Wall',
|
||||
'-Werror',
|
||||
'-Wimplicit-fallthrough',
|
||||
'-Wno-mismatched-tags', # clang-only
|
||||
'-Wno-c++11-narrowing',
|
||||
'-Wno-overloaded-virtual',
|
||||
'-Wno-unused-command-line-argument',
|
||||
'-Wno-unsupported-friend',
|
||||
'-Wno-implicit-int-float-conversion',
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77728
|
||||
'-Wno-psabi',
|
||||
'-Wno-narrowing',
|
||||
]
|
||||
|
||||
warnings = [w
|
||||
for w in warnings
|
||||
if flag_supported(flag=w, compiler=args.cxx)]
|
||||
def get_warning_options(cxx):
|
||||
warnings = [
|
||||
'-Wall',
|
||||
'-Werror',
|
||||
'-Wimplicit-fallthrough',
|
||||
'-Wno-mismatched-tags', # clang-only
|
||||
'-Wno-c++11-narrowing',
|
||||
'-Wno-overloaded-virtual',
|
||||
'-Wno-unused-command-line-argument',
|
||||
'-Wno-unsupported-friend',
|
||||
'-Wno-implicit-int-float-conversion',
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=77728
|
||||
'-Wno-psabi',
|
||||
'-Wno-narrowing',
|
||||
]
|
||||
|
||||
warnings = [w
|
||||
for w in warnings
|
||||
if flag_supported(flag=w, compiler=cxx)]
|
||||
|
||||
return ' '.join(warnings + ['-Wno-error=deprecated-declarations'])
|
||||
|
||||
warnings = ' '.join(warnings + ['-Wno-error=deprecated-declarations'])
|
||||
|
||||
def get_clang_inline_threshold():
|
||||
if args.clang_inline_threshold != -1:
|
||||
@@ -1589,6 +1580,7 @@ pkgs = []
|
||||
pkgs.append('lua53' if have_pkg('lua53') else 'lua')
|
||||
|
||||
pkgs.append('libsystemd')
|
||||
pkgs.append('jsoncpp')
|
||||
|
||||
has_sanitize_address_use_after_scope = try_compile(compiler=args.cxx, flags=['-fsanitize-address-use-after-scope'], source='int f() {}')
|
||||
|
||||
@@ -1648,15 +1640,22 @@ for m, mode_config in modes.items():
|
||||
# At the end of the build we check that the build-id is indeed in the
|
||||
# first page. At install time we check that patchelf doesn't modify
|
||||
# the program headers.
|
||||
def dynamic_linker_option():
|
||||
gcc_linker_output = subprocess.check_output(['gcc', '-###', '/dev/null', '-o', 't'], stderr=subprocess.STDOUT).decode('utf-8')
|
||||
original_dynamic_linker = re.search('-dynamic-linker ([^ ]*)', gcc_linker_output).groups()[0]
|
||||
|
||||
gcc_linker_output = subprocess.check_output(['gcc', '-###', '/dev/null', '-o', 't'], stderr=subprocess.STDOUT).decode('utf-8')
|
||||
original_dynamic_linker = re.search('-dynamic-linker ([^ ]*)', gcc_linker_output).groups()[0]
|
||||
if employ_ld_trickery:
|
||||
# gdb has a SO_NAME_MAX_PATH_SIZE of 512, so limit the path size to
|
||||
# that. The 512 includes the null at the end, hence the 511 bellow.
|
||||
dynamic_linker = '/' * (511 - len(original_dynamic_linker)) + original_dynamic_linker
|
||||
else:
|
||||
dynamic_linker = original_dynamic_linker
|
||||
employ_ld_trickery = True
|
||||
# distro-specific setup
|
||||
if os.environ.get('NIX_CC'):
|
||||
employ_ld_trickery = False
|
||||
|
||||
if employ_ld_trickery:
|
||||
# gdb has a SO_NAME_MAX_PATH_SIZE of 512, so limit the path size to
|
||||
# that. The 512 includes the null at the end, hence the 511 bellow.
|
||||
dynamic_linker = '/' * (511 - len(original_dynamic_linker)) + original_dynamic_linker
|
||||
else:
|
||||
dynamic_linker = original_dynamic_linker
|
||||
return f'--dynamic-linker={dynamic_linker}'
|
||||
|
||||
forced_ldflags = '-Wl,'
|
||||
|
||||
@@ -1666,7 +1665,7 @@ forced_ldflags = '-Wl,'
|
||||
# explicitly ask for SHA1 build-ids.
|
||||
forced_ldflags += '--build-id=sha1,'
|
||||
|
||||
forced_ldflags += f'--dynamic-linker={dynamic_linker}'
|
||||
forced_ldflags += dynamic_linker_option()
|
||||
|
||||
user_ldflags = forced_ldflags + ' ' + args.user_ldflags
|
||||
|
||||
@@ -1744,10 +1743,7 @@ if not args.dist_only:
|
||||
for mode, mode_config in build_modes.items():
|
||||
configure_seastar(outdir, mode, mode_config)
|
||||
|
||||
pc = {mode: f'{outdir}/{mode}/seastar/seastar.pc' for mode in build_modes}
|
||||
|
||||
def query_seastar_flags(pc_file, link_static_cxx=False):
|
||||
use_shared_libs = modes[mode]['build_seastar_shared_libs']
|
||||
def query_seastar_flags(pc_file, use_shared_libs, link_static_cxx=False):
|
||||
if use_shared_libs:
|
||||
opt = '--shared'
|
||||
else:
|
||||
@@ -1760,13 +1756,11 @@ def query_seastar_flags(pc_file, link_static_cxx=False):
|
||||
if link_static_cxx:
|
||||
libs = libs.replace('-lstdc++ ', '')
|
||||
|
||||
return cflags, libs
|
||||
testing_libs = pkg_config(pc_file.replace('seastar.pc', 'seastar-testing.pc'), '--libs', '--static')
|
||||
return {'seastar_cflags': cflags,
|
||||
'seastar_libs': libs,
|
||||
'seastar_testing_libs': testing_libs}
|
||||
|
||||
for mode in build_modes:
|
||||
seastar_pc_cflags, seastar_pc_libs = query_seastar_flags(pc[mode], link_static_cxx=args.staticcxx)
|
||||
modes[mode]['seastar_cflags'] = seastar_pc_cflags
|
||||
modes[mode]['seastar_libs'] = seastar_pc_libs
|
||||
modes[mode]['seastar_testing_libs'] = pkg_config(pc[mode].replace('seastar.pc', 'seastar-testing.pc'), '--libs', '--static')
|
||||
|
||||
abseil_pkgs = [
|
||||
'absl_raw_hash_set',
|
||||
@@ -1775,8 +1769,7 @@ abseil_pkgs = [
|
||||
|
||||
pkgs += abseil_pkgs
|
||||
|
||||
user_cflags += " " + pkg_config('jsoncpp', '--cflags')
|
||||
libs = ' '.join([maybe_static(args.staticyamlcpp, '-lyaml-cpp'), '-latomic', '-llz4', '-lz', '-lsnappy', pkg_config('jsoncpp', '--libs'),
|
||||
libs = ' '.join([maybe_static(args.staticyamlcpp, '-lyaml-cpp'), '-latomic', '-llz4', '-lz', '-lsnappy',
|
||||
' -lstdc++fs', ' -lcrypt', ' -lcryptopp', ' -lpthread',
|
||||
# Must link with static version of libzstd, since
|
||||
# experimental APIs that we use are only present there.
|
||||
@@ -1813,6 +1806,7 @@ def write_build_file(f,
|
||||
scylla_version,
|
||||
scylla_release,
|
||||
args):
|
||||
warnings = get_warning_options(args.cxx)
|
||||
f.write(textwrap.dedent('''\
|
||||
configure_args = {configure_args}
|
||||
builddir = {outdir}
|
||||
@@ -1907,8 +1901,13 @@ def write_build_file(f,
|
||||
else:
|
||||
f.write(f'build $builddir/{wasm}: c2wasm {src}\n')
|
||||
f.write(f'build $builddir/{binary}: wasm2wat $builddir/{wasm}\n')
|
||||
|
||||
for mode in build_modes:
|
||||
modeval = modes[mode]
|
||||
modeval.update(query_seastar_flags(f'{outdir}/{mode}/seastar/seastar.pc',
|
||||
modeval['build_seastar_shared_libs'],
|
||||
args.staticcxx))
|
||||
|
||||
fmt_lib = 'fmt'
|
||||
f.write(textwrap.dedent('''\
|
||||
cxx_ld_flags_{mode} = {cxx_ld_flags}
|
||||
@@ -1969,7 +1968,7 @@ def write_build_file(f,
|
||||
command = CARGO_BUILD_DEP_INFO_BASEDIR='.' cargo build --locked --manifest-path=rust/Cargo.toml --target-dir=$builddir/{mode} --profile=rust-{mode} $
|
||||
&& touch $out
|
||||
description = RUST_LIB $out
|
||||
''').format(mode=mode, antlr3_exec=args.antlr3_exec, fmt_lib=fmt_lib, test_repeat=test_repeat, test_timeout=test_timeout, **modeval))
|
||||
''').format(mode=mode, antlr3_exec=args.antlr3_exec, fmt_lib=fmt_lib, test_repeat=args.test_repeat, test_timeout=args.test_timeout, **modeval))
|
||||
f.write(
|
||||
'build {mode}-build: phony {artifacts} {wasms}\n'.format(
|
||||
mode=mode,
|
||||
@@ -2070,6 +2069,8 @@ def write_build_file(f,
|
||||
objs=' '.join(compiles)
|
||||
)
|
||||
)
|
||||
|
||||
headers = find_headers('.', excluded_dirs=['idl', 'build', 'seastar', '.git'])
|
||||
f.write(
|
||||
'build {mode}-headers: phony {header_objs}\n'.format(
|
||||
mode=mode,
|
||||
|
||||
@@ -7,7 +7,35 @@ generate_cql_grammar(
|
||||
SOURCES cql_grammar_srcs)
|
||||
set_source_files_properties(${cql_grammar_srcs}
|
||||
PROPERTIES
|
||||
COMPILE_FLAGS "-Wno-uninitialized -Wno-parentheses-equality")
|
||||
COMPILE_OPTIONS "-Wno-uninitialized;-Wno-parentheses-equality")
|
||||
|
||||
set(cql_parser_srcs ${cql_grammar_srcs})
|
||||
list(FILTER cql_parser_srcs INCLUDE REGEX "Parser.cpp$")
|
||||
|
||||
set(unoptimized_levels "0" "g" "s")
|
||||
if(Seastar_OptimizationLevel_${build_mode} IN_LIST unoptimized_levels)
|
||||
# Unoptimized parsers end up using huge amounts of stack space and
|
||||
# overflowing their stack
|
||||
list(APPEND cql_parser_compile_options
|
||||
"-O1")
|
||||
endif()
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag("-fsanitize-address-use-after-scope"
|
||||
_sanitize_address_use_after_scope_supported)
|
||||
if(_sanitize_address_use_after_scope_supported)
|
||||
# use-after-scope sanitizer also uses large amount of stack space
|
||||
# and overflows the stack of CqlParser
|
||||
list(APPEND cql_parser_compile_options
|
||||
"-fno-sanitize-address-use-after-scope")
|
||||
endif()
|
||||
|
||||
if(DEFINED cql_parser_compile_options)
|
||||
set_property(
|
||||
SOURCE ${cql_parser_srcs}
|
||||
APPEND
|
||||
PROPERTY COMPILE_OPTIONS ${cql_parser_compile_options})
|
||||
endif()
|
||||
|
||||
add_library(cql3 STATIC)
|
||||
target_sources(cql3
|
||||
|
||||
@@ -197,6 +197,7 @@ concept LeafExpression
|
||||
/// A column, usually encountered on the left side of a restriction.
|
||||
/// An expression like `mycol < 5` would be expressed as a binary_operator
|
||||
/// with column_value on the left hand side.
|
||||
/// The column_definition* points inside the schema_ptr used during preparation.
|
||||
struct column_value {
|
||||
const column_definition* col;
|
||||
|
||||
@@ -207,8 +208,8 @@ struct column_value {
|
||||
|
||||
/// A subscripted value, eg list_colum[2], val[sub]
|
||||
struct subscript {
|
||||
expression val;
|
||||
expression sub;
|
||||
expression val; // The value that is being subscripted
|
||||
expression sub; // The value between the square braces
|
||||
data_type type; // may be null before prepare
|
||||
|
||||
friend bool operator==(const subscript&, const subscript&) = default;
|
||||
@@ -235,7 +236,8 @@ enum class null_handling_style {
|
||||
lwt_nulls, // evaluate(NULL = NULL) -> TRUE, evaluate(NULL < x) -> exception
|
||||
};
|
||||
|
||||
/// Operator restriction: LHS op RHS.
|
||||
// An operation on two items (left hand side and right hand side).
|
||||
// For example: "col = 2", "(col1, col2) = (?, 3)"
|
||||
struct binary_operator {
|
||||
expression lhs;
|
||||
oper_t op;
|
||||
@@ -248,14 +250,18 @@ struct binary_operator {
|
||||
friend bool operator==(const binary_operator&, const binary_operator&) = default;
|
||||
};
|
||||
|
||||
/// A conjunction of restrictions.
|
||||
// A conjunction of expressions separated by the AND keyword.
|
||||
// For example: "a < 3 AND col1 = ? AND pk IN (1, 2)"
|
||||
struct conjunction {
|
||||
std::vector<expression> children;
|
||||
|
||||
friend bool operator==(const conjunction&, const conjunction&) = default;
|
||||
};
|
||||
|
||||
// Gets resolved eventually into a column_value.
|
||||
// A string that represents a column name.
|
||||
// It's not validated in any way, it's just a name that someone wrote.
|
||||
// During preparation it's resolved and converted into a validated column_value.
|
||||
// For example "my_col", "pk1"
|
||||
struct unresolved_identifier {
|
||||
::shared_ptr<column_identifier_raw> ident;
|
||||
|
||||
@@ -265,6 +271,7 @@ struct unresolved_identifier {
|
||||
};
|
||||
|
||||
// An attribute attached to a column mutation: writetime or ttl
|
||||
// For example: "WRITETIME(my_col)", "TTL(some_col)"
|
||||
struct column_mutation_attribute {
|
||||
enum class attribute_kind { writetime, ttl };
|
||||
|
||||
@@ -276,7 +283,11 @@ struct column_mutation_attribute {
|
||||
friend bool operator==(const column_mutation_attribute&, const column_mutation_attribute&) = default;
|
||||
};
|
||||
|
||||
// Function call.
|
||||
// For example: "some_func(123, 456)", "token(col1, col2)"
|
||||
struct function_call {
|
||||
// Before preparation "func" is a function_name.
|
||||
// During preparation it's converted into db::functions::function
|
||||
std::variant<functions::function_name, shared_ptr<db::functions::function>> func;
|
||||
std::vector<expression> args;
|
||||
|
||||
@@ -312,8 +323,14 @@ struct function_call {
|
||||
friend bool operator==(const function_call&, const function_call&) = default;
|
||||
};
|
||||
|
||||
// Represents casting an expression to a given type.
|
||||
// There are two types of casts - C style and SQL style.
|
||||
// For example: "(text)ascii_column", "CAST(int_column as blob)"
|
||||
struct cast {
|
||||
enum class cast_style { c, sql };
|
||||
enum class cast_style {
|
||||
c, // (int)arg
|
||||
sql // CAST(arg as int)
|
||||
};
|
||||
cast_style style;
|
||||
expression arg;
|
||||
std::variant<data_type, shared_ptr<cql3_type::raw>> type;
|
||||
@@ -321,6 +338,8 @@ struct cast {
|
||||
friend bool operator==(const cast&, const cast&) = default;
|
||||
};
|
||||
|
||||
// Represents accessing a field inside a struct (user defined type).
|
||||
// For example: "udt_val.udt_field"
|
||||
struct field_selection {
|
||||
expression structure;
|
||||
shared_ptr<column_identifier_raw> field;
|
||||
@@ -330,7 +349,12 @@ struct field_selection {
|
||||
friend bool operator==(const field_selection&, const field_selection&) = default;
|
||||
};
|
||||
|
||||
// Represents a bind marker, both named and unnamed.
|
||||
// For example: "?", ":myvar"
|
||||
// It contains only the index, for named bind markers the names are kept inside query_options.
|
||||
struct bind_variable {
|
||||
// Index of this bind marker inside the query string.
|
||||
// Consecutive bind markers are numbered 0, 1, 2, 3, ...
|
||||
int32_t bind_index;
|
||||
|
||||
// Describes where this bound value will be assigned.
|
||||
@@ -342,6 +366,8 @@ struct bind_variable {
|
||||
|
||||
// A constant which does not yet have a date type. It is partially typed
|
||||
// (we know if it's floating or int) but not sized.
|
||||
// For example: "123", "1.341", "null"
|
||||
// During preparation it's assigned an exact type and converted into expr::constant.
|
||||
struct untyped_constant {
|
||||
enum type_class { integer, floating_point, string, boolean, duration, uuid, hex, null };
|
||||
type_class partial_type;
|
||||
@@ -354,7 +380,9 @@ untyped_constant make_untyped_null();
|
||||
|
||||
// Represents a constant value with known value and type
|
||||
// For null and unset the type can sometimes be set to empty_type
|
||||
// For example: "123", "abcddef", "[1, 2, 3, 4, 5]"
|
||||
struct constant {
|
||||
// The CQL value, serialized to binary representation.
|
||||
cql3::raw_value value;
|
||||
|
||||
// Never nullptr, for NULL and UNSET might be empty_type
|
||||
@@ -374,7 +402,9 @@ struct constant {
|
||||
friend bool operator==(const constant&, const constant&) = default;
|
||||
};
|
||||
|
||||
// Denotes construction of a tuple from its elements, e.g. ('a', ?, some_column) in CQL.
|
||||
// Denotes construction of a tuple from its elements.
|
||||
// For example: "('a', ?, some_column)"
|
||||
// During preparation tuple constructors with constant values are converted to expr::constant.
|
||||
struct tuple_constructor {
|
||||
std::vector<expression> elements;
|
||||
|
||||
@@ -386,9 +416,13 @@ struct tuple_constructor {
|
||||
};
|
||||
|
||||
// Constructs a collection of same-typed elements
|
||||
// For example: "[1, 2, ?]", "{5, 6, 7}", {1: 2, 3: 4}"
|
||||
// During preparation collection constructors with constant values are converted to expr::constant.
|
||||
struct collection_constructor {
|
||||
enum class style_type { list, set, map };
|
||||
style_type style;
|
||||
|
||||
// For map constructors, elements is a list of key-pair tuples.
|
||||
std::vector<expression> elements;
|
||||
|
||||
// Might be nullptr before prepare.
|
||||
@@ -399,6 +433,8 @@ struct collection_constructor {
|
||||
};
|
||||
|
||||
// Constructs an object of a user-defined type
|
||||
// For example: "{field1: 23343, field2: ?}"
|
||||
// During preparation usertype constructors with constant values are converted to expr::constant.
|
||||
struct usertype_constructor {
|
||||
using elements_map_type = std::unordered_map<column_identifier, expression>;
|
||||
elements_map_type elements;
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <seastar/coroutine/parallel_for_each.hh>
|
||||
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
|
||||
@@ -214,54 +215,25 @@ static thread_local std::pair<std::string_view, data_type> new_columns[] {
|
||||
{"workload_type", utf8_type}
|
||||
};
|
||||
|
||||
static bool has_missing_columns(data_dictionary::database db) noexcept {
|
||||
assert(this_shard_id() == 0);
|
||||
try {
|
||||
auto schema = db.find_schema(system_distributed_keyspace::NAME, system_distributed_keyspace::SERVICE_LEVELS);
|
||||
for (const auto& col : new_columns) {
|
||||
auto& [col_name, col_type] = col;
|
||||
bytes options_name = to_bytes(col_name.data());
|
||||
if (schema->get_column_definition(options_name)) {
|
||||
continue;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} catch (...) {
|
||||
dlogger.warn("Failed to update options column in the role attributes table: {}", std::current_exception());
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
static schema_ptr get_current_service_levels(data_dictionary::database db) {
|
||||
return db.has_schema(system_distributed_keyspace::NAME, system_distributed_keyspace::SERVICE_LEVELS)
|
||||
? db.find_schema(system_distributed_keyspace::NAME, system_distributed_keyspace::SERVICE_LEVELS)
|
||||
: service_levels();
|
||||
}
|
||||
|
||||
static future<> add_new_columns_if_missing(replica::database& db, ::service::migration_manager& mm, ::service::group0_guard group0_guard) noexcept {
|
||||
static schema_ptr get_updated_service_levels(data_dictionary::database db) {
|
||||
assert(this_shard_id() == 0);
|
||||
try {
|
||||
auto schema = db.find_schema(system_distributed_keyspace::NAME, system_distributed_keyspace::SERVICE_LEVELS);
|
||||
schema_builder b(schema);
|
||||
bool updated = false;
|
||||
for (const auto& col : new_columns) {
|
||||
auto& [col_name, col_type] = col;
|
||||
bytes options_name = to_bytes(col_name.data());
|
||||
if (schema->get_column_definition(options_name)) {
|
||||
continue;
|
||||
}
|
||||
updated = true;
|
||||
b.with_column(options_name, col_type, column_kind::regular_column);
|
||||
auto schema = get_current_service_levels(db);
|
||||
schema_builder b(schema);
|
||||
for (const auto& col : new_columns) {
|
||||
auto& [col_name, col_type] = col;
|
||||
bytes options_name = to_bytes(col_name.data());
|
||||
if (schema->get_column_definition(options_name)) {
|
||||
continue;
|
||||
}
|
||||
if (updated) {
|
||||
schema_ptr table = b.build();
|
||||
try {
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
co_return co_await mm.announce(co_await service::prepare_column_family_update_announcement(mm.get_storage_proxy(), table, false,
|
||||
std::vector<view_ptr>(), ts), std::move(group0_guard), "Add new columns to system_distributed.service_levels");
|
||||
} catch (...) {}
|
||||
}
|
||||
} catch (...) {
|
||||
// FIXME: do we really want to allow the node to boot if the table fails to update?
|
||||
// Will this not prevent other components from working correctly?
|
||||
dlogger.warn("Failed to update options column in the role attributes table: {}", std::current_exception());
|
||||
b.with_column(options_name, col_type, column_kind::regular_column);
|
||||
}
|
||||
return b.build();
|
||||
}
|
||||
|
||||
future<> system_distributed_keyspace::start() {
|
||||
@@ -270,79 +242,81 @@ future<> system_distributed_keyspace::start() {
|
||||
co_return;
|
||||
}
|
||||
|
||||
// FIXME: fix this code to `announce` once
|
||||
auto db = _sp.data_dictionary();
|
||||
auto tables = ensured_tables();
|
||||
|
||||
if (!_sp.get_db().local().has_keyspace(NAME)) {
|
||||
auto group0_guard = co_await _mm.start_group0_operation();
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
// Check if there is any work to do before taking the group 0 guard.
|
||||
bool keyspaces_setup = db.has_keyspace(NAME) && db.has_keyspace(NAME_EVERYWHERE);
|
||||
bool tables_setup = std::all_of(tables.begin(), tables.end(), [db] (schema_ptr t) { return db.has_schema(t->ks_name(), t->cf_name()); } );
|
||||
bool service_levels_up_to_date = get_current_service_levels(db)->equal_columns(*get_updated_service_levels(db));
|
||||
if (keyspaces_setup && tables_setup && service_levels_up_to_date) {
|
||||
dlogger.info("system_distributed(_everywhere) keyspaces and tables are up-to-date. Not creating");
|
||||
_started = true;
|
||||
co_return;
|
||||
}
|
||||
|
||||
try {
|
||||
auto ksm = keyspace_metadata::new_keyspace(
|
||||
NAME,
|
||||
"org.apache.cassandra.locator.SimpleStrategy",
|
||||
{{"replication_factor", "3"}},
|
||||
true /* durable_writes */);
|
||||
co_await _mm.announce(service::prepare_new_keyspace_announcement(_sp.local_db(), ksm, ts), std::move(group0_guard),
|
||||
"Create system_distributed keyspace");
|
||||
} catch (exceptions::already_exists_exception&) {}
|
||||
auto group0_guard = co_await _mm.start_group0_operation();
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
std::vector<mutation> mutations;
|
||||
sstring description;
|
||||
|
||||
auto sd_ksm = keyspace_metadata::new_keyspace(
|
||||
NAME,
|
||||
"org.apache.cassandra.locator.SimpleStrategy",
|
||||
{{"replication_factor", "3"}},
|
||||
true /* durable_writes */);
|
||||
if (!db.has_keyspace(NAME)) {
|
||||
mutations = service::prepare_new_keyspace_announcement(db.real_database(), sd_ksm, ts);
|
||||
description += format(" create {} keyspace;", NAME);
|
||||
} else {
|
||||
dlogger.info("{} keyspace is already present. Not creating", NAME);
|
||||
}
|
||||
|
||||
if (!_sp.get_db().local().has_keyspace(NAME_EVERYWHERE)) {
|
||||
auto group0_guard = co_await _mm.start_group0_operation();
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
|
||||
try {
|
||||
auto ksm = keyspace_metadata::new_keyspace(
|
||||
NAME_EVERYWHERE,
|
||||
"org.apache.cassandra.locator.EverywhereStrategy",
|
||||
{},
|
||||
true /* durable_writes */);
|
||||
co_await _mm.announce(service::prepare_new_keyspace_announcement(_sp.local_db(), ksm, ts), std::move(group0_guard),
|
||||
"Create system_distributed_everywhere keyspace");
|
||||
} catch (exceptions::already_exists_exception&) {}
|
||||
auto sde_ksm = keyspace_metadata::new_keyspace(
|
||||
NAME_EVERYWHERE,
|
||||
"org.apache.cassandra.locator.EverywhereStrategy",
|
||||
{},
|
||||
true /* durable_writes */);
|
||||
if (!db.has_keyspace(NAME_EVERYWHERE)) {
|
||||
auto sde_mutations = service::prepare_new_keyspace_announcement(db.real_database(), sde_ksm, ts);
|
||||
std::move(sde_mutations.begin(), sde_mutations.end(), std::back_inserter(mutations));
|
||||
description += format(" create {} keyspace;", NAME_EVERYWHERE);
|
||||
} else {
|
||||
dlogger.info("{} keyspace is already present. Not creating", NAME_EVERYWHERE);
|
||||
}
|
||||
|
||||
auto tables = ensured_tables();
|
||||
bool exist = std::all_of(tables.begin(), tables.end(), [this] (schema_ptr s) {
|
||||
return _sp.get_db().local().has_schema(s->ks_name(), s->cf_name());
|
||||
});
|
||||
// Get mutations for creating and updating tables.
|
||||
auto num_keyspace_mutations = mutations.size();
|
||||
co_await coroutine::parallel_for_each(ensured_tables(),
|
||||
[this, &mutations, db, ts, sd_ksm, sde_ksm] (auto&& table) -> future<> {
|
||||
auto ksm = table->ks_name() == NAME ? sd_ksm : sde_ksm;
|
||||
|
||||
if (!exist) {
|
||||
auto group0_guard = co_await _mm.start_group0_operation();
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
|
||||
auto m = co_await map_reduce(tables,
|
||||
/* Mapper */ [this, ts] (auto&& table) -> future<std::vector<mutation>> {
|
||||
try {
|
||||
co_return co_await service::prepare_new_column_family_announcement(_sp, std::move(table), ts);
|
||||
} catch (exceptions::already_exists_exception&) {
|
||||
co_return std::vector<mutation>();
|
||||
}
|
||||
},
|
||||
/* Initial value*/ std::vector<mutation>(),
|
||||
/* Reducer */ [] (std::vector<mutation> m1, std::vector<mutation> m2) {
|
||||
std::move(m2.begin(), m2.end(), std::back_inserter(m1));
|
||||
return m1;
|
||||
});
|
||||
if (m.size()) {
|
||||
co_await _mm.announce(std::move(m), std::move(group0_guard),
|
||||
"Create system_distributed(_everywhere) tables");
|
||||
// Ensure that the service_levels table contains new columns.
|
||||
if (table->cf_name() == SERVICE_LEVELS) {
|
||||
table = get_updated_service_levels(db);
|
||||
}
|
||||
|
||||
if (!db.has_schema(table->ks_name(), table->cf_name())) {
|
||||
co_return co_await service::prepare_new_column_family_announcement(mutations, _sp, *ksm, std::move(table), ts);
|
||||
}
|
||||
|
||||
// The service_levels table exists. Update it if it lacks new columns.
|
||||
if (table->cf_name() == SERVICE_LEVELS && !get_current_service_levels(db)->equal_columns(*table)) {
|
||||
auto update_mutations = co_await service::prepare_column_family_update_announcement(_sp, table, false, std::vector<view_ptr>(), ts);
|
||||
std::move(update_mutations.begin(), update_mutations.end(), std::back_inserter(mutations));
|
||||
}
|
||||
});
|
||||
if (mutations.size() > num_keyspace_mutations) {
|
||||
description += " create and update system_distributed(_everywhere) tables";
|
||||
} else {
|
||||
dlogger.info("All tables are present on start");
|
||||
dlogger.info("All tables are present and up-to-date on start");
|
||||
}
|
||||
|
||||
if (!mutations.empty()) {
|
||||
co_await _mm.announce(std::move(mutations), std::move(group0_guard), description);
|
||||
}
|
||||
|
||||
_started = true;
|
||||
if (has_missing_columns(_qp.db())) {
|
||||
auto group0_guard = co_await _mm.start_group0_operation();
|
||||
co_await add_new_columns_if_missing(_qp.db().real_database(), _mm, std::move(group0_guard));
|
||||
} else {
|
||||
dlogger.info("All schemas are uptodate on start");
|
||||
}
|
||||
}
|
||||
|
||||
future<> system_distributed_keyspace::stop() {
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/parallel_for_each.hh>
|
||||
#include "system_keyspace.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
@@ -1052,7 +1053,6 @@ schema_ptr system_keyspace::sstables_registry() {
|
||||
return schema_builder(NAME, SSTABLES_REGISTRY, id)
|
||||
.with_column("location", utf8_type, column_kind::partition_key)
|
||||
.with_column("generation", timeuuid_type, column_kind::clustering_key)
|
||||
.with_column("uuid", uuid_type)
|
||||
.with_column("status", utf8_type)
|
||||
.with_column("version", utf8_type)
|
||||
.with_column("format", utf8_type)
|
||||
@@ -1465,6 +1465,27 @@ future<std::unordered_map<table_id, db_clock::time_point>> system_keyspace::load
|
||||
co_return result;
|
||||
}
|
||||
|
||||
future<> system_keyspace::drop_truncation_rp_records() {
|
||||
sstring req = format("SELECT table_uuid, shard, segment_id from system.{}", TRUNCATED);
|
||||
auto rs = co_await execute_cql(req);
|
||||
|
||||
bool any = false;
|
||||
co_await coroutine::parallel_for_each(rs->begin(), rs->end(), [&] (const cql3::untyped_result_set_row& row) -> future<> {
|
||||
auto table_uuid = table_id(row.get_as<utils::UUID>("table_uuid"));
|
||||
auto shard = row.get_as<int32_t>("shard");
|
||||
auto segment_id = row.get_as<int64_t>("segment_id");
|
||||
|
||||
if (segment_id != 0) {
|
||||
any = true;
|
||||
sstring req = format("UPDATE system.{} SET segment_id = 0, position = 0 WHERE table_uuid = {} AND shard = {}", TRUNCATED, table_uuid, shard);
|
||||
co_await execute_cql(req);
|
||||
}
|
||||
});
|
||||
if (any) {
|
||||
co_await force_blocking_flush(TRUNCATED);
|
||||
}
|
||||
}
|
||||
|
||||
future<> system_keyspace::save_truncation_record(const replica::column_family& cf, db_clock::time_point truncated_at, db::replay_position rp) {
|
||||
sstring req = format("INSERT INTO system.{} (table_uuid, shard, position, segment_id, truncated_at) VALUES(?,?,?,?,?)", TRUNCATED);
|
||||
co_await _qp.execute_internal(req, {cf.schema()->id().uuid(), int32_t(rp.shard_id()), int32_t(rp.pos), int64_t(rp.base_id()), truncated_at}, cql3::query_processor::cache_internal::yes);
|
||||
@@ -1548,6 +1569,9 @@ future<> system_keyspace::update_tokens(gms::inet_address ep, const std::unorder
|
||||
slogger.debug("INSERT INTO system.{} (peer, tokens) VALUES ({}, {})", PEERS, ep, tokens);
|
||||
auto set_type = set_type_impl::get_instance(utf8_type, true);
|
||||
co_await execute_cql(req, ep.addr(), make_set_value(set_type, prepare_tokens(tokens))).discard_result();
|
||||
if (!_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(PEERS);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1653,7 +1677,7 @@ future<> system_keyspace::set_scylla_local_param_as(const sstring& key, const T&
|
||||
sstring req = format("UPDATE system.{} SET value = ? WHERE key = ?", system_keyspace::SCYLLA_LOCAL);
|
||||
auto type = data_type_for<T>();
|
||||
co_await execute_cql(req, type->to_string_impl(data_value(value)), key).discard_result();
|
||||
if (visible_before_cl_replay) {
|
||||
if (visible_before_cl_replay || !_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(SCYLLA_LOCAL);
|
||||
}
|
||||
}
|
||||
@@ -1692,6 +1716,9 @@ future<> system_keyspace::remove_endpoint(gms::inet_address ep) {
|
||||
sstring req = format("DELETE FROM system.{} WHERE peer = ?", PEERS);
|
||||
slogger.debug("DELETE FROM system.{} WHERE peer = {}", PEERS, ep);
|
||||
co_await execute_cql(req, ep.addr()).discard_result();
|
||||
if (!_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(PEERS);
|
||||
}
|
||||
}
|
||||
|
||||
future<> system_keyspace::update_tokens(const std::unordered_set<dht::token>& tokens) {
|
||||
@@ -1702,6 +1729,9 @@ future<> system_keyspace::update_tokens(const std::unordered_set<dht::token>& to
|
||||
sstring req = format("INSERT INTO system.{} (key, tokens) VALUES (?, ?)", LOCAL);
|
||||
auto set_type = set_type_impl::get_instance(utf8_type, true);
|
||||
co_await execute_cql(req, sstring(LOCAL), make_set_value(set_type, prepare_tokens(tokens)));
|
||||
if (!_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(PEERS);
|
||||
}
|
||||
}
|
||||
|
||||
future<> system_keyspace::force_blocking_flush(sstring cfname) {
|
||||
@@ -1747,6 +1777,9 @@ future<> system_keyspace::update_cdc_generation_id(cdc::generation_id gen_id) {
|
||||
sstring(v3::CDC_LOCAL), id.ts, id.id);
|
||||
}
|
||||
), gen_id);
|
||||
if (!_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(v3::CDC_LOCAL);
|
||||
}
|
||||
}
|
||||
|
||||
future<std::optional<cdc::generation_id>> system_keyspace::get_cdc_generation_id() {
|
||||
@@ -1828,6 +1861,9 @@ future<> system_keyspace::set_bootstrap_state(bootstrap_state state) {
|
||||
|
||||
sstring req = format("INSERT INTO system.{} (key, bootstrapped) VALUES (?, ?)", LOCAL);
|
||||
co_await execute_cql(req, sstring(LOCAL), state_name).discard_result();
|
||||
if (!_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(LOCAL);
|
||||
}
|
||||
co_await container().invoke_on_all([state] (auto& sys_ks) {
|
||||
sys_ks._cache->_state = state;
|
||||
});
|
||||
@@ -1888,6 +1924,7 @@ future<> system_keyspace::make(
|
||||
replica::database& db) {
|
||||
for (auto&& table : system_keyspace::all_tables(db.get_config())) {
|
||||
co_await db.create_local_system_table(table, maybe_write_in_user_memory(table), erm_factory);
|
||||
co_await db.find_column_family(table).init_storage();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2027,6 +2064,9 @@ future<int> system_keyspace::increment_and_get_generation() {
|
||||
}
|
||||
req = format("INSERT INTO system.{} (key, gossip_generation) VALUES ('{}', ?)", LOCAL, LOCAL);
|
||||
co_await _qp.execute_internal(req, {generation.value()}, cql3::query_processor::cache_internal::yes);
|
||||
if (!_db.uses_schema_commitlog()) {
|
||||
co_await force_blocking_flush(LOCAL);
|
||||
}
|
||||
co_return generation;
|
||||
}
|
||||
|
||||
@@ -2691,24 +2731,10 @@ mutation system_keyspace::make_cleanup_candidate_mutation(std::optional<cdc::gen
|
||||
return m;
|
||||
}
|
||||
|
||||
future<> system_keyspace::sstables_registry_create_entry(sstring location, utils::UUID uuid, sstring status, sstables::entry_descriptor desc) {
|
||||
static const auto req = format("INSERT INTO system.{} (location, generation, uuid, status, version, format) VALUES (?, ?, ?, ?, ?, ?)", SSTABLES_REGISTRY);
|
||||
slogger.trace("Inserting {}.{}:{} into {}", location, desc.generation, uuid, SSTABLES_REGISTRY);
|
||||
co_await execute_cql(req, location, desc.generation, uuid, status, fmt::to_string(desc.version), fmt::to_string(desc.format)).discard_result();
|
||||
}
|
||||
|
||||
future<utils::UUID> system_keyspace::sstables_registry_lookup_entry(sstring location, sstables::generation_type gen) {
|
||||
static const auto req = format("SELECT uuid FROM system.{} WHERE location = ? AND generation = ?", SSTABLES_REGISTRY);
|
||||
slogger.trace("Looking up {}.{} in {}", location, gen, SSTABLES_REGISTRY);
|
||||
auto msg = co_await execute_cql(req, location, gen);
|
||||
if (msg->empty() || !msg->one().has("uuid")) {
|
||||
slogger.trace("ERROR: Cannot find {}.{} in {}", location, gen, SSTABLES_REGISTRY);
|
||||
co_await coroutine::return_exception(std::runtime_error("No entry in sstables registry"));
|
||||
}
|
||||
|
||||
auto uuid = msg->one().get_as<utils::UUID>("uuid");
|
||||
slogger.trace("Found {}.{}:{} in {}", location, gen, uuid, SSTABLES_REGISTRY);
|
||||
co_return uuid;
|
||||
future<> system_keyspace::sstables_registry_create_entry(sstring location, sstring status, sstables::entry_descriptor desc) {
|
||||
static const auto req = format("INSERT INTO system.{} (location, generation, status, version, format) VALUES (?, ?, ?, ?, ?)", SSTABLES_REGISTRY);
|
||||
slogger.trace("Inserting {}.{} into {}", location, desc.generation, SSTABLES_REGISTRY);
|
||||
co_await execute_cql(req, location, desc.generation, status, fmt::to_string(desc.version), fmt::to_string(desc.format)).discard_result();
|
||||
}
|
||||
|
||||
future<> system_keyspace::sstables_registry_update_entry_status(sstring location, sstables::generation_type gen, sstring status) {
|
||||
@@ -2724,17 +2750,16 @@ future<> system_keyspace::sstables_registry_delete_entry(sstring location, sstab
|
||||
}
|
||||
|
||||
future<> system_keyspace::sstables_registry_list(sstring location, sstable_registry_entry_consumer consumer) {
|
||||
static const auto req = format("SELECT uuid, status, generation, version, format FROM system.{} WHERE location = ?", SSTABLES_REGISTRY);
|
||||
static const auto req = format("SELECT status, generation, version, format FROM system.{} WHERE location = ?", SSTABLES_REGISTRY);
|
||||
slogger.trace("Listing {} entries from {}", location, SSTABLES_REGISTRY);
|
||||
|
||||
co_await _qp.query_internal(req, db::consistency_level::ONE, { location }, 1000, [ consumer = std::move(consumer) ] (const cql3::untyped_result_set::row& row) -> future<stop_iteration> {
|
||||
auto uuid = row.get_as<utils::UUID>("uuid");
|
||||
auto status = row.get_as<sstring>("status");
|
||||
auto gen = sstables::generation_type(row.get_as<utils::UUID>("generation"));
|
||||
auto ver = sstables::version_from_string(row.get_as<sstring>("version"));
|
||||
auto fmt = sstables::format_from_string(row.get_as<sstring>("format"));
|
||||
sstables::entry_descriptor desc(gen, ver, fmt, sstables::component_type::TOC);
|
||||
co_await consumer(std::move(uuid), std::move(status), std::move(desc));
|
||||
co_await consumer(std::move(status), std::move(desc));
|
||||
co_return stop_iteration::no;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -355,6 +355,7 @@ public:
|
||||
|
||||
future<> save_truncation_record(const replica::column_family&, db_clock::time_point truncated_at, db::replay_position);
|
||||
future<replay_positions> get_truncated_positions(table_id);
|
||||
future<> drop_truncation_rp_records();
|
||||
|
||||
/**
|
||||
* Return a map of stored tokens to IP addresses
|
||||
@@ -497,11 +498,10 @@ public:
|
||||
// Assumes that the history table exists, i.e. Raft experimental feature is enabled.
|
||||
static future<mutation> get_group0_history(distributed<replica::database>&);
|
||||
|
||||
future<> sstables_registry_create_entry(sstring location, utils::UUID uuid, sstring status, sstables::entry_descriptor desc);
|
||||
future<utils::UUID> sstables_registry_lookup_entry(sstring location, sstables::generation_type gen);
|
||||
future<> sstables_registry_create_entry(sstring location, sstring status, sstables::entry_descriptor desc);
|
||||
future<> sstables_registry_update_entry_status(sstring location, sstables::generation_type gen, sstring status);
|
||||
future<> sstables_registry_delete_entry(sstring location, sstables::generation_type gen);
|
||||
using sstable_registry_entry_consumer = noncopyable_function<future<>(utils::UUID uuid, sstring state, sstables::entry_descriptor desc)>;
|
||||
using sstable_registry_entry_consumer = noncopyable_function<future<>(sstring state, sstables::entry_descriptor desc)>;
|
||||
future<> sstables_registry_list(sstring location, sstable_registry_entry_consumer consumer);
|
||||
|
||||
future<std::optional<sstring>> load_group0_upgrade_state();
|
||||
|
||||
@@ -91,7 +91,7 @@ redirects: setup
|
||||
# Preview commands
|
||||
.PHONY: preview
|
||||
preview: setup
|
||||
$(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --host $(PREVIEW_HOST) --port 5500 --ignore '_data/*'
|
||||
$(POETRY) run sphinx-autobuild -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml --host $(PREVIEW_HOST) --port 5500 --ignore *.csv --ignore *.yaml
|
||||
|
||||
.PHONY: multiversionpreview
|
||||
multiversionpreview: multiversion
|
||||
|
||||
@@ -118,6 +118,7 @@ class AMIVersionsTemplateDirective(Directive):
|
||||
option_spec = {
|
||||
"version": directives.unchanged,
|
||||
"exclude": directives.unchanged,
|
||||
"only_latest": directives.flag,
|
||||
}
|
||||
|
||||
def _extract_version_from_filename(self, filename):
|
||||
@@ -169,6 +170,8 @@ class AMIVersionsTemplateDirective(Directive):
|
||||
LOGGER.warning(
|
||||
f"No files match in directory '{download_directory}' with version pattern '{version_pattern}'."
|
||||
)
|
||||
elif "only_latest" in self.options:
|
||||
files = [files[0]]
|
||||
|
||||
output = []
|
||||
for file in files:
|
||||
|
||||
6
docs/_static/css/custom.css
vendored
6
docs/_static/css/custom.css
vendored
@@ -17,10 +17,14 @@
|
||||
}
|
||||
|
||||
.content blockquote li p {
|
||||
margin-bottom: 10px;
|
||||
margin-bottom: 5px;
|
||||
}
|
||||
|
||||
h3 .pre {
|
||||
font-size: 16px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
hr {
|
||||
max-width: 100%;
|
||||
}
|
||||
7
docs/_templates/db_config.tmpl
vendored
7
docs/_templates/db_config.tmpl
vendored
@@ -13,8 +13,8 @@
|
||||
|
||||
{% for item in group.properties %}
|
||||
{% if item.value_status == value_status %}
|
||||
``{{ item.name }}``
|
||||
{{ '=' * (item.name|length + 4) }}
|
||||
{{ item.name }}
|
||||
{{ '=' * (item.name|length) }}
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -24,6 +24,9 @@
|
||||
{% if item.default %}* **Default value:** ``{{ item.default }}``{% endif %}
|
||||
{% if item.liveness %}* **Liveness** :term:`* <Liveness>` **:** ``{{ item.liveness }}``{% endif %}
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<hr/>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,2 +1,107 @@
|
||||
### a dictionary of redirections
|
||||
#old path: new path
|
||||
#old path: new path
|
||||
|
||||
|
||||
# Removed the outdated upgrade guides
|
||||
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-ubuntu-14-to-16.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/upgrade-guide-from-2.x.y-to-2.x.z-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/upgrade-guide-from-2.x.y-to-2.x.z-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.x.y-to-2.x.z/upgrade-guide-from-2.x.y-to-2.x.z-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/upgrade-guide-from-2.1-to-2.2-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/upgrade-guide-from-2.1-to-2.2-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/upgrade-guide-from-2.1-to-2.2-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.1-to-2.2/metric-update-2.1-to-2.2.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/upgrade-guide-from-2.2-to-2.3-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.2-to-2.3/metric-update-2.2-to-2.3.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/upgrade-guide-from-2.3-to-3.0-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-2.3-to-3.0/metric-update-2.3-to-3.0.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/upgrade-guide-from-3.0-to-3.1-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.0-to-3.1/metric-update-3.0-to-3.1.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/upgrade-guide-from-3.1-to-3.2-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.1-to-3.2/metric-update-3.1-to-3.2.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/upgrade-guide-from-3.2-to-3.3-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.2-to-3.3/metric-update-3.2-to-3.3.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/upgrade-guide-from-3.3-to-4.0-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.3-to-4.0/metric-update-3.3-to-4.0.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/upgrade-guide-from-3.x.y-to-3.x.z-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/upgrade-guide-from-3.x.y-to-3.x.z-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/upgrade-guide-from-3.x.y-to-3.x.z-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/upgrade-guide-from-4.0-to-4.1-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/metric-update-4.0-to-4.1.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/upgrade-guide-from-4.x.y-to-4.x.z-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/upgrade-guide-from-4.x.y-to-4.x.z-ubuntu.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/upgrade-guide-from-4.x.y-to-4.x.z-debian.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/upgrade-guide-from-4.1-to-4.2-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.1-to-4.2/metric-update-4.1-to-4.2.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/upgrade-guide-from-4.2-to-4.3-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.2-to-4.3/metric-update-4.2-to-4.3.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-ubuntu-20-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/upgrade-guide-from-4.3-to-4.4-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.3-to-4.4/metric-update-4.3-to-4.4.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-ubuntu-20-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/upgrade-guide-from-4.4-to-4.5-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.4-to-4.5/metric-update-4.4-to-4.5.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/index.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-rpm.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-ubuntu-16-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-ubuntu-18-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-ubuntu-20-04.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-debian-9.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/upgrade-guide-from-4.5-to-4.6-debian-10.html: /stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-opensource/upgrade-guide-from-4.5-to-4.6/metric-update-4.5-to-4.6.html: /stable/upgrade/index.html
|
||||
|
||||
|
||||
@@ -163,7 +163,7 @@ The message suggests the initial course of action:
|
||||
One of the reasons why the procedure may get stuck is a pre-existing problem in schema definitions which causes schema to be unable to synchronize in the cluster. The procedure cannot proceed unless it ensures that schema is synchronized.
|
||||
If **all nodes are alive and the network is healthy**, you performed a rolling restart, but the issue still persists, contact `ScyllaDB support <https://www.scylladb.com/product/support/>`_ for assistance.
|
||||
|
||||
If some nodes are **dead and irrecoverable**, you'll need to perform a manual recovery procedure. Consult :ref:`the section about Raft recovery <recover-raft-procedure>`.
|
||||
If some nodes are **dead and irrecoverable**, you'll need to perform a manual recovery procedure. Consult :ref:`the section about Raft recovery <recovery-procedure>`.
|
||||
|
||||
|
||||
Verifying that Raft is enabled
|
||||
@@ -189,7 +189,7 @@ on every node.
|
||||
|
||||
If the query returns 0 rows, or ``value`` is ``synchronize`` or ``use_pre_raft_procedures``, it means that the cluster is in the middle of the Raft upgrade procedure; consult the :ref:`relevant section <verify-raft-procedure>`.
|
||||
|
||||
If ``value`` is ``recovery``, it means that the cluster is in the middle of the manual recovery procedure. The procedure must be finished. Consult :ref:`the section about Raft recovery <recover-raft-procedure>`.
|
||||
If ``value`` is ``recovery``, it means that the cluster is in the middle of the manual recovery procedure. The procedure must be finished. Consult :ref:`the section about Raft recovery <recovery-procedure>`.
|
||||
|
||||
If ``value`` is anything else, it might mean data corruption or a mistake when performing the manual recovery procedure. The value will be treated as if it was equal to ``recovery`` when the node is restarted.
|
||||
|
||||
@@ -219,127 +219,8 @@ In summary, Raft makes schema changes safe, but it requires that a quorum of nod
|
||||
|
||||
Handling Failures
|
||||
------------------
|
||||
Raft requires a quorum of nodes in a cluster to be available. If one or more nodes are down, but the quorum is live, reads, writes,
|
||||
and schema updates proceed unaffected.
|
||||
When the node that was down is up again, it first contacts the cluster to fetch the latest schema and then starts serving queries.
|
||||
|
||||
The following examples show the recovery actions depending on the number of nodes and DCs in your cluster.
|
||||
|
||||
Examples
|
||||
=========
|
||||
|
||||
.. list-table:: Cluster A: 1 datacenter, 3 nodes
|
||||
:widths: 20 40 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Failure
|
||||
- Consequence
|
||||
- Action to take
|
||||
* - 1 node
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the node. If the node is dead, :doc:`replace it with a new node </operating-scylla/procedures/cluster-management/replace-dead-node/>`.
|
||||
* - 2 nodes
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- Restart at least 1 of the 2 nodes that are down to regain quorum. If you can’t recover at least 1 of the 2 nodes, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
|
||||
.. list-table:: Cluster B: 2 datacenters, 6 nodes (3 nodes per DC)
|
||||
:widths: 20 40 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Failure
|
||||
- Consequence
|
||||
- Action to take
|
||||
* - 1-2 nodes
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the node(s). If the node is dead, :doc:`replace it with a new node </operating-scylla/procedures/cluster-management/replace-dead-node/>`.
|
||||
* - 3 nodes
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- Restart 1 of the 3 nodes that are down to regain quorum. If you can’t recover at least 1 of the 3 failed nodes, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
* - 1DC
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- When the DCs come back online, restart the nodes. If the DC fails to come back online and the nodes are lost, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
|
||||
|
||||
.. list-table:: Cluster C: 3 datacenter, 9 nodes (3 nodes per DC)
|
||||
:widths: 20 40 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Failure
|
||||
- Consequence
|
||||
- Action to take
|
||||
* - 1-4 nodes
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the nodes. If the nodes are dead, :doc:`replace them with new nodes </operating-scylla/procedures/cluster-management/replace-dead-node-or-more/>`.
|
||||
* - 1 DC
|
||||
- Schema updates are possible and safe.
|
||||
- When the DC comes back online, try restarting the nodes in the cluster. If the nodes are dead, :doc:`add 3 new nodes in a new region </operating-scylla/procedures/cluster-management/add-dc-to-existing-dc/>`.
|
||||
* - 2 DCs
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- When the DCs come back online, restart the nodes. If at least one DC fails to come back online and the nodes are lost, consult the :ref:`manual Raft recovery section <recover-raft-procedure>`.
|
||||
|
||||
.. _recover-raft-procedure:
|
||||
|
||||
Raft manual recovery procedure
|
||||
==============================
|
||||
|
||||
The manual Raft recovery procedure applies to the following situations:
|
||||
|
||||
* :ref:`The Raft upgrade procedure <verify-raft-procedure>` got stuck because one of your nodes failed in the middle of the procedure and is irrecoverable,
|
||||
* or the cluster was running Raft but a majority of nodes (e.g. 2 our of 3) failed and are irrecoverable. Raft cannot progress unless a majority of nodes is available.
|
||||
|
||||
.. warning::
|
||||
|
||||
Perform the manual recovery procedure **only** if you're dealing with **irrecoverable** nodes. If it is possible to restart your nodes, do that instead of manual recovery.
|
||||
|
||||
.. note::
|
||||
|
||||
Before proceeding, make sure that the irrecoverable nodes are truly dead, and not, for example, temporarily partitioned away due to a network failure. If it is possible for the 'dead' nodes to come back to life, they might communicate and interfere with the recovery procedure and cause unpredictable problems.
|
||||
|
||||
If you have no means of ensuring that these irrecoverable nodes won't come back to life and communicate with the rest of the cluster, setup firewall rules or otherwise isolate your alive nodes to reject any communication attempts from these dead nodes.
|
||||
|
||||
During the manual recovery procedure you'll enter a special ``RECOVERY`` mode, remove all faulty nodes (using the standard :doc:`node removal procedure </operating-scylla/procedures/cluster-management/remove-node/>`), delete the internal Raft data, and restart the cluster. This will cause the cluster to perform the Raft upgrade procedure again, initializing the Raft algorithm from scratch. The manual recovery procedure is applicable both to clusters which were not running Raft in the past and then had Raft enabled, and to clusters which were bootstrapped using Raft.
|
||||
|
||||
.. note::
|
||||
|
||||
Entering ``RECOVERY`` mode requires a node restart. Restarting an additional node while some nodes are already dead may lead to unavailability of data queries (assuming that you haven't lost it already). For example, if you're using the standard RF=3, CL=QUORUM setup, and you're recovering from a stuck of upgrade procedure because one of your nodes is dead, restarting another node will cause temporary data query unavailability (until the node finishes restarting). Prepare your service for downtime before proceeding.
|
||||
|
||||
#. Perform the following query on **every alive node** in the cluster, using e.g. ``cqlsh``:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> UPDATE system.scylla_local SET value = 'recovery' WHERE key = 'group0_upgrade_state';
|
||||
|
||||
#. Perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>` of your alive nodes.
|
||||
|
||||
#. Verify that all the nodes have entered ``RECOVERY`` mode when restarting; look for one of the following messages in their logs:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
group0_client - RECOVERY mode.
|
||||
raft_group0 - setup_group0: Raft RECOVERY mode, skipping group 0 setup.
|
||||
raft_group0_upgrade - RECOVERY mode. Not attempting upgrade.
|
||||
|
||||
#. Remove all your dead nodes using the :doc:`node removal procedure </operating-scylla/procedures/cluster-management/remove-node/>`.
|
||||
|
||||
#. Remove existing Raft cluster data by performing the following queries on **every alive node** in the cluster, using e.g. ``cqlsh``:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> TRUNCATE TABLE system.discovery;
|
||||
cqlsh> TRUNCATE TABLE system.group0_history;
|
||||
cqlsh> DELETE value FROM system.scylla_local WHERE key = 'raft_group0_id';
|
||||
|
||||
#. Make sure that schema is synchronized in the cluster by executing :doc:`nodetool describecluster </operating-scylla/nodetool-commands/describecluster>` on each node and verifying that the schema version is the same on all nodes.
|
||||
|
||||
#. We can now leave ``RECOVERY`` mode. On **every alive node**, perform the following query:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> DELETE FROM system.scylla_local WHERE key = 'group0_upgrade_state';
|
||||
|
||||
#. Perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>` of your alive nodes.
|
||||
|
||||
#. The Raft upgrade procedure will start anew. :ref:`Verify <verify-raft-procedure>` that it finishes successfully.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>`.
|
||||
|
||||
.. _raft-learn-more:
|
||||
|
||||
|
||||
@@ -16,11 +16,11 @@ sys.path.insert(0, os.path.abspath(".."))
|
||||
BASE_URL = 'https://opensource.docs.scylladb.com'
|
||||
# Build documentation for the following tags and branches.
|
||||
TAGS = []
|
||||
BRANCHES = ["master", "branch-5.1", "branch-5.2", "branch-5.3"]
|
||||
BRANCHES = ["master", "branch-5.1", "branch-5.2", "branch-5.4"]
|
||||
# Set the latest version.
|
||||
LATEST_VERSION = "branch-5.2"
|
||||
# Set which versions are not released yet.
|
||||
UNSTABLE_VERSIONS = ["master", "branch-5.3"]
|
||||
UNSTABLE_VERSIONS = ["master", "branch-5.4"]
|
||||
# Set which versions are deprecated.
|
||||
DEPRECATED_VERSIONS = [""]
|
||||
|
||||
|
||||
@@ -357,7 +357,7 @@ is not perfect and the actual rate of accepted requests may be higher up to
|
||||
a factor of keyspace's `RF`. This feature should not be used to enforce precise
|
||||
limits but rather serve as an overload protection feature.
|
||||
|
||||
_NOTE): This feature works best when shard-aware drivers are used (rejected
|
||||
_NOTE_: This feature works best when shard-aware drivers are used (rejected
|
||||
requests have the least cost).
|
||||
|
||||
Limits are configured separately for reads and writes. Some examples:
|
||||
|
||||
@@ -8,6 +8,12 @@ CQL stores data in *tables*, whose schema defines the layout of said data in the
|
||||
which is the replication strategy used by the keyspace. An application can have only one keyspace. However, it is also possible to
|
||||
have multiple keyspaces in case your application has different replication requirements.
|
||||
|
||||
.. note::
|
||||
|
||||
Schema updates require at least a quorum of nodes in a cluster to be available.
|
||||
If the quorum is lost, it must be restored before a schema is updated.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>` for details.
|
||||
|
||||
This section describes the statements used to create, modify, and remove keyspaces and tables.
|
||||
|
||||
:ref:`CREATE KEYSPACE <create-keyspace-statement>`
|
||||
|
||||
@@ -5,6 +5,11 @@ Adding a New Node Into an Existing ScyllaDB Cluster (Out Scale)
|
||||
When you add a new node, other nodes in the cluster stream data to the new node. This operation is called bootstrapping and may
|
||||
be time-consuming, depending on the data size and network bandwidth. If using a :ref:`multi-availability-zone <faq-best-scenario-node-multi-availability-zone>`, make sure they are balanced.
|
||||
|
||||
.. note::
|
||||
|
||||
Adding a new node requires at least a quorum of nodes in a cluster to be available.
|
||||
If the quorum is lost, it must be restored before a new node is added.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>` for details.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
@@ -4,6 +4,12 @@ Remove a Node from a ScyllaDB Cluster (Down Scale)
|
||||
|
||||
You can remove nodes from your cluster to reduce its size.
|
||||
|
||||
.. note::
|
||||
|
||||
Removing a node requires at least a quorum of nodes in a cluster to be available.
|
||||
If the quorum is lost, it must be restored before a node is removed.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>` for details.
|
||||
|
||||
-----------------------
|
||||
Removing a Running Node
|
||||
-----------------------
|
||||
|
||||
@@ -2,7 +2,11 @@
|
||||
Replace More Than One Dead Node In A ScyllaDB Cluster
|
||||
******************************************************
|
||||
|
||||
Scylla is a fault-tolerant system. A cluster can be available even when more than one node is down.
|
||||
.. note::
|
||||
|
||||
Replacing a node requires at least a quorum of nodes in a cluster to be available.
|
||||
If the quorum is lost, it must be restored before a node is replaced.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>` for details.
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
@@ -5,6 +5,12 @@ Replace dead node operation will cause the other nodes in the cluster to stream
|
||||
|
||||
This procedure is for replacing one dead node. To replace more than one dead node, run the full procedure to completion one node at a time.
|
||||
|
||||
.. note::
|
||||
|
||||
Replacing a node requires at least a quorum of nodes in a cluster to be available.
|
||||
If the quorum is lost, it must be restored before a node is replaced.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>` for details.
|
||||
|
||||
-------------
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
@@ -7,6 +7,12 @@ There are two methods to replace a running node in a Scylla cluster.
|
||||
#. `Add a new node to the cluster and then decommission the old node`_
|
||||
#. `Replace a running node - by taking its place in the cluster`_
|
||||
|
||||
.. note::
|
||||
|
||||
Replacing a node requires at least a quorum of nodes in a cluster to be available.
|
||||
If the quorum is lost, it must be restored before a node is replaced.
|
||||
See :doc:`Handling Node Failures </troubleshooting/handling-node-failures>` for details.
|
||||
|
||||
|
||||
Add a new node to the cluster and then decommission the old node
|
||||
=================================================================
|
||||
|
||||
@@ -5,6 +5,8 @@ Cluster and Node
|
||||
:hidden:
|
||||
:maxdepth: 2
|
||||
|
||||
Handling Node Failures </troubleshooting/handling-node-failures>
|
||||
Failure to Add, Remove, or Replace a Node </troubleshooting/failed-add-remove-replace>
|
||||
Failed Decommission Problem </troubleshooting/failed-decommission/>
|
||||
Cluster Timeouts </troubleshooting/timeouts>
|
||||
Node Joined With No Data </troubleshooting/node-joined-without-any-data>
|
||||
@@ -21,6 +23,8 @@ Cluster and Node
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
* :doc:`Handling Node Failures </troubleshooting/handling-node-failures>`
|
||||
* :doc:`Failure to Add, Remove, or Replace a Node </troubleshooting/failed-add-remove-replace>`
|
||||
* :doc:`Failed Decommission Problem </troubleshooting/failed-decommission/>`
|
||||
* :doc:`Cluster Timeouts </troubleshooting/timeouts>`
|
||||
* :doc:`Node Joined With No Data </troubleshooting/node-joined-without-any-data>`
|
||||
|
||||
9
docs/troubleshooting/failed-add-remove-replace.rst
Normal file
9
docs/troubleshooting/failed-add-remove-replace.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Failure to Add, Remove, or Replace a Node
|
||||
------------------------------------------------
|
||||
|
||||
ScyllaDB relies on the Raft consensus algorithm, which requires at least a quorum
|
||||
of nodes in a cluster to be available. If some nodes are down and the quorum is
|
||||
lost, adding, removing, and replacing a node fails.
|
||||
|
||||
See :doc:`Handling Node Failures <handling-node-failures>` for information about
|
||||
recovery actions depending on the number of nodes and DCs in your cluster.
|
||||
9
docs/troubleshooting/failed-update-schema.rst
Normal file
9
docs/troubleshooting/failed-update-schema.rst
Normal file
@@ -0,0 +1,9 @@
|
||||
Failure to Update the Schema
|
||||
------------------------------------------------
|
||||
|
||||
ScyllaDB relies on the Raft consensus algorithm, which requires at least a quorum
|
||||
of nodes in a cluster to be available. If some nodes are down and the quorum is
|
||||
lost, schema updates fail.
|
||||
|
||||
See :doc:`Handling Node Failures <handling-node-failures>` for information about
|
||||
recovery actions depending on the number of nodes and DCs in your cluster.
|
||||
159
docs/troubleshooting/handling-node-failures.rst
Normal file
159
docs/troubleshooting/handling-node-failures.rst
Normal file
@@ -0,0 +1,159 @@
|
||||
Handling Node Failures
|
||||
------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
This page applies to ScyllaDB clusters that use Raft to ensure consistency.
|
||||
You can verify that Raft-based consistent management is enabled for your
|
||||
cluster in the ``scylla.yaml`` file (enabled by default):
|
||||
``consistent_cluster_management: true``
|
||||
|
||||
.. REMOVE IN FUTURE VERSIONS - Remove the above note when Raft is mandatory
|
||||
and default for both new and existing clusters.
|
||||
|
||||
ScyllaDB relies on the Raft consensus algorithm, which requires at least a quorum
|
||||
of nodes in a cluster to be available. If one or more nodes are down, but the quorum
|
||||
is live, reads, writes, and schema updates proceed unaffected. When the node that
|
||||
was down is up again, it first contacts the cluster to fetch the latest schema and
|
||||
then starts serving queries.
|
||||
|
||||
The following examples show the recovery actions when one or more nodes or DCs
|
||||
are down, depending on the number of nodes and DCs in your cluster.
|
||||
|
||||
Examples
|
||||
=========
|
||||
|
||||
.. list-table:: Cluster A: 1 datacenter, 3 nodes
|
||||
:widths: 20 40 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Failure
|
||||
- Consequence
|
||||
- Action to take
|
||||
* - 1 node
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the node. If the node is dead, :doc:`replace it with a new node </operating-scylla/procedures/cluster-management/replace-dead-node/>`.
|
||||
* - 2 nodes
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- Restart at least 1 of the 2 nodes that are down to regain quorum. If you can’t recover at least 1 of the 2 nodes, consult the :ref:`manual recovery section <recovery-procedure>`.
|
||||
|
||||
.. list-table:: Cluster B: 2 datacenters, 6 nodes (3 nodes per DC)
|
||||
:widths: 20 40 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Failure
|
||||
- Consequence
|
||||
- Action to take
|
||||
* - 1-2 nodes
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the node(s). If the node is dead, :doc:`replace it with a new node </operating-scylla/procedures/cluster-management/replace-dead-node/>`.
|
||||
* - 3 nodes
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- Restart 1 of the 3 nodes that are down to regain quorum. If you can’t recover at least 1 of the 3 failed nodes, consult the :ref:`manual recovery <recovery-procedure>` section.
|
||||
* - 1DC
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- When the DCs come back online, restart the nodes. If the DC fails to come back online and the nodes are lost, consult the :ref:`manual recovery <recovery-procedure>` section.
|
||||
|
||||
|
||||
.. list-table:: Cluster C: 3 datacenter, 9 nodes (3 nodes per DC)
|
||||
:widths: 20 40 40
|
||||
:header-rows: 1
|
||||
|
||||
* - Failure
|
||||
- Consequence
|
||||
- Action to take
|
||||
* - 1-4 nodes
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the nodes. If the nodes are dead, :doc:`replace them with new nodes </operating-scylla/procedures/cluster-management/replace-dead-node-or-more/>`.
|
||||
* - 1 DC
|
||||
- Schema updates are possible and safe.
|
||||
- When the DC comes back online, try restarting the nodes in the cluster. If the nodes are dead, :doc:`add 3 new nodes in a new region </operating-scylla/procedures/cluster-management/add-dc-to-existing-dc/>`.
|
||||
* - 2 DCs
|
||||
- Data is available for reads and writes, schema changes are impossible.
|
||||
- When the DCs come back online, restart the nodes. If at least one DC fails to come back online and the nodes are lost, consult the :ref:`manual recovery <recovery-procedure>` section.
|
||||
|
||||
.. _recovery-procedure:
|
||||
|
||||
Manual Recovery Procedure
|
||||
===========================
|
||||
|
||||
You can follow the manual recovery procedure when:
|
||||
|
||||
* The majority of nodes (for example, 2 out of 3) failed and are irrecoverable.
|
||||
* :ref:`The Raft upgrade procedure <verify-raft-procedure>` got stuck because one
|
||||
of the nodes failed in the middle of the procedure and is irrecoverable. This
|
||||
may occur in existing clusters where Raft was manually enabled.
|
||||
See :ref:`Enabling Raft <enabling-raft-existing-cluster>` for details.
|
||||
|
||||
.. warning::
|
||||
|
||||
Perform the manual recovery procedure **only** if you're dealing with
|
||||
**irrecoverable** nodes. If possible, restart your nodes, and use the manual
|
||||
recovery procedure as a last resort.
|
||||
|
||||
.. note::
|
||||
|
||||
Before proceeding, make sure that the irrecoverable nodes are truly dead, and not,
|
||||
for example, temporarily partitioned away due to a network failure. If it is
|
||||
possible for the 'dead' nodes to come back to life, they might communicate and
|
||||
interfere with the recovery procedure and cause unpredictable problems.
|
||||
|
||||
If you have no means of ensuring that these irrecoverable nodes won't come back
|
||||
to life and communicate with the rest of the cluster, setup firewall rules or otherwise
|
||||
isolate your alive nodes to reject any communication attempts from these dead nodes.
|
||||
|
||||
During the manual recovery procedure you'll enter a special ``RECOVERY`` mode, remove
|
||||
all faulty nodes (using the standard :doc:`node removal procedure </operating-scylla/procedures/cluster-management/remove-node/>`),
|
||||
delete the internal Raft data, and restart the cluster. This will cause the cluster to
|
||||
perform the Raft upgrade procedure again, initializing the Raft algorithm from scratch.
|
||||
|
||||
The manual recovery procedure is applicable both to clusters that were not running Raft
|
||||
in the past and then had Raft enabled, and to clusters that were bootstrapped using Raft.
|
||||
|
||||
.. note::
|
||||
|
||||
Entering ``RECOVERY`` mode requires a node restart. Restarting an additional node while
|
||||
some nodes are already dead may lead to unavailability of data queries (assuming that
|
||||
you haven't lost it already). For example, if you're using the standard RF=3,
|
||||
CL=QUORUM setup, and you're recovering from a stuck of upgrade procedure because one
|
||||
of your nodes is dead, restarting another node will cause temporary data query
|
||||
unavailability (until the node finishes restarting). Prepare your service for
|
||||
downtime before proceeding.
|
||||
|
||||
#. Perform the following query on **every alive node** in the cluster, using e.g. ``cqlsh``:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> UPDATE system.scylla_local SET value = 'recovery' WHERE key = 'group0_upgrade_state';
|
||||
|
||||
#. Perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>` of your alive nodes.
|
||||
|
||||
#. Verify that all the nodes have entered ``RECOVERY`` mode when restarting; look for one of the following messages in their logs:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
group0_client - RECOVERY mode.
|
||||
raft_group0 - setup_group0: Raft RECOVERY mode, skipping group 0 setup.
|
||||
raft_group0_upgrade - RECOVERY mode. Not attempting upgrade.
|
||||
|
||||
#. Remove all your dead nodes using the :doc:`node removal procedure </operating-scylla/procedures/cluster-management/remove-node/>`.
|
||||
|
||||
#. Remove existing Raft cluster data by performing the following queries on **every alive node** in the cluster, using e.g. ``cqlsh``:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> TRUNCATE TABLE system.discovery;
|
||||
cqlsh> TRUNCATE TABLE system.group0_history;
|
||||
cqlsh> DELETE value FROM system.scylla_local WHERE key = 'raft_group0_id';
|
||||
|
||||
#. Make sure that schema is synchronized in the cluster by executing :doc:`nodetool describecluster </operating-scylla/nodetool-commands/describecluster>` on each node and verifying that the schema version is the same on all nodes.
|
||||
|
||||
#. We can now leave ``RECOVERY`` mode. On **every alive node**, perform the following query:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
cqlsh> DELETE FROM system.scylla_local WHERE key = 'group0_upgrade_state';
|
||||
|
||||
#. Perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>` of your alive nodes.
|
||||
|
||||
#. The Raft upgrade procedure will start anew. :ref:`Verify <verify-raft-procedure>` that it finishes successfully.
|
||||
@@ -8,6 +8,7 @@ Data Modeling
|
||||
Scylla Large Partitions Table </troubleshooting/large-partition-table/>
|
||||
Scylla Large Rows and Cells Table </troubleshooting/large-rows-large-cells-tables/>
|
||||
Large Partitions Hunting </troubleshooting/debugging-large-partition/>
|
||||
Failure to Update the Schema </troubleshooting/failed-update-schema>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -25,6 +26,8 @@ Data Modeling
|
||||
|
||||
* :doc:`Large Partitions Hunting </troubleshooting/debugging-large-partition/>`
|
||||
|
||||
* :doc:`Failure to Update the Schema </troubleshooting/failed-update-schema>`
|
||||
|
||||
`Data Modeling course <https://university.scylladb.com/courses/data-modeling/>`_ on Scylla University
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -1,170 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.6 to 1.7 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.6 to Scylla 1.7, and rollback to 1.6 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.6.x to Scylla version 1.7.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.7 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.6
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 1.6.x version, stop right here! This guide only covers 1.6.x to 1.7.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **1.7**
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 1.7
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.7.x to 1.6.y. Apply this procedure if an upgrade from 1.6 to 1.7 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.7
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.6, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **1.6**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.6 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,190 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.7 to 2.0 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.7 to Scylla 2.0, and rollback to 1.7 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.7.x (x >= 4) to Scylla version 2.0.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* check cluster schema
|
||||
* drain node and backup the data
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.0 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.7
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 1.7.x version, stop right here! This guide only covers 1.7.x to 2.0.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.0**
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.0
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 1.7 to 2.0<metric-update-1.7-to-2.0>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.0.x to 1.7.y (y >= 4). Apply this procedure if an upgrade from 1.7 to 2.0 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.0
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.7, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **1.7**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.7 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.0 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,157 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 1.x.y to 1.x.z for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.x.y to Scylla 1.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.x.y to Scylla version 1.x.z on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file and deb packages
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file and deb packages
|
||||
------------------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.x.z
|
||||
|
||||
If you install scylla by apt, you can find the deb packages in ``/var/cache/apt/``, backup them to ``scylla_1.x.y_backup`` directory which will be used in rollback.
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 1.x.y version, stop right here! This guide only covers 1.x.y to 1.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **1.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.x.z to 1.x.y. Apply this procedure if an upgrade from 1.x.y to 1.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.x.y, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Install the old release from backuped deb packages
|
||||
--------------------------------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla_1.x.y_backup/scylla*.deb
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,201 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.0 to 2.1 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.0 to Scylla 2.1, and rollback to 2.0 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.0.x to Scylla version 2.1.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.1 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2.0
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.0.x version, stop right here! This guide only covers 2.0.x to 2.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.1**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.1
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.0 to 2.1<metric-update-2.0-to-2.1>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.1.x to 2.0.y. Apply this procedure if an upgrade from 2.0 to 2.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.1
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.0, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.0**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2.0 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.1 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,200 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.1 to 2.2 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.1 to Scylla 2.2, and rollback to 2.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.1.x to Scylla version 2.2.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.2 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.1; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.1.x version, stop right here! This guide only covers 2.1.x to 2.2.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.2**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.2
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.1 to 2.2<metric-update-2.1-to-2.2>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.2.x to 2.1.y. Apply this procedure if an upgrade from 2.1 to 2.2 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.2
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.1, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.1**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.1 $conf; done
|
||||
sudo systemctl daemon-reload (Ubuntu 16.04 and Debian 8)
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.2 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,203 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.2 to 2.3 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.2 to Scylla 2.3, and rollback to 2.2 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.2.x to Scylla version 2.3.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.3 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.2; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.2.x version, stop right here! This guide only covers 2.2.x to 2.3.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.3**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 2.3
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.2 to 2.3<metric-update-2.2-to-2.3>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.3.x to 2.2.y. Apply this procedure if an upgrade from 2.2 to 2.3 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.3
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.2, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
|
||||
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.2**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.2 $conf; done
|
||||
sudo systemctl daemon-reload (Ubuntu 16.04 and Debian 8)
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.3 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,218 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 2.3 to 3.0 for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.3 to Scylla 3.0, and rollback to 2.3 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.3.x to Scylla version 3.0.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 3.0 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.3; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.3.x version, stop right here! This guide only covers 2.3.x to 3.0.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **3.0**, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Debian 8:
|
||||
sudo apt-get install gnupg-curl -y
|
||||
sudo apt-key adv --fetch-keys https://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/Release.key
|
||||
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/home:/scylladb:/scylla-3rdparty-jessie/Debian_8.0/ /' > /etc/apt/sources.list.d/scylla-3rdparty.list"
|
||||
|
||||
Ubuntu 14/16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Upgrade java to 1.8 on Ubuntu 14.04 and Debian 8, which is requested by Scylla 3.0
|
||||
|
||||
* |ENABLE_APT_REPO|
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |JESSIE_BACKPORTS|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
4. Upgrade node_exporter
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service node_exporter stop
|
||||
sudo rm /usr/bin/node_exporter
|
||||
sudo node_exporter_install
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Debian 8) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 2.3 to 3.0<metric-update-2.3-to-3.0>`
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 3.0.x to 2.3.y. Apply this procedure if an upgrade from 2.3 to 3.0 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 3.0
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.3, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |APT|_ to **2.3**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.3 $conf; done
|
||||
sudo systemctl daemon-reload (Ubuntu 16.04 and Debian 8)
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 3.0 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Install old node_exporter
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service node_exporter stop
|
||||
sudo rm /usr/bin/node_exporter
|
||||
sudo node_exporter_install
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,155 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 2.x.y to 2.x.z for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 2.x.y to Scylla 2.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 2.x.y to Scylla version 2.x.z on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2.x.y version, stop right here! This guide only covers 2.x.y to 2.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **2.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.x.z to 2.x.y. Apply this procedure if an upgrade from 2.x.y to 2.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 2.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=2.x.y\* scylla-server=2.x.y\* scylla-jmx=2.x.y\* scylla-tools=2.x.y\* scylla-tools-core=2.x.y\* scylla-kernel-conf=2.x.y\* scylla-conf=2.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,161 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 3.x.y to 3.x.z for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 3.x.y to Scylla 3.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 3.x.y to Scylla version 3.x.z on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
.. include:: /upgrade/_common/note-ubuntu14.rst
|
||||
|
||||
.. include:: /upgrade/upgrade-opensource/upgrade-guide-from-3.x.y-to-3.x.z/_common/note_3.1.0_to_3.1.1.rst
|
||||
|
||||
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 3.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-3.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 3.x.y version, stop right here! This guide only covers 3.x.y to 3.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **3.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Ubuntu 18.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 3.x.z to 3.x.y. Apply this procedure if an upgrade from 3.x.y to 3.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 3.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 3.x.y, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=3.x.y\* scylla-server=3.x.y\* scylla-jmx=3.x.y\* scylla-tools=3.x.y\* scylla-tools-core=3.x.y\* scylla-kernel-conf=3.x.y\* scylla-conf=3.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-3.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,158 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - ScyllaDB |FROM| to |TO| for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla |FROM| to Scylla |TO|.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |FROM| to Scylla version |TO| on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
.. include:: /upgrade/_common/note-ubuntu14.rst
|
||||
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data
|
||||
* Check your current release
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |TO| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-4.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |FROM| version, stop right here! This guide only covers |FROM| to |TO| upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |APT|_ to **4.x**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04 and Ubuntu 18.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release |TO| to |FROM|. Apply this procedure if an upgrade from |FROM| to |TO| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |TO|.
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |FROM|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Downgrade to previous release
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
-----------------------------
|
||||
1. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=4.x.y\* scylla-server=4.x.y\* scylla-jmx=4.x.y\* scylla-tools=4.x.y\* scylla-tools-core=4.x.y\* scylla-kernel-conf=4.x.y\* scylla-conf=4.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-4.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,183 +0,0 @@
|
||||
=============================================================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
.. _rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,197 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-|SRC_VERSION|
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Ubuntu 16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Config java to 1.8, which is requested by |SCYLLA_NAME| |NEW_VERSION|
|
||||
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |OPENJDK|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get dist-upgrade |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get remove scylla\* -y
|
||||
\ sudo apt-get install |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-|SRC_VERSION| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,196 +0,0 @@
|
||||
=============================================================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
---------------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,213 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-|SRC_VERSION|
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|, and enable scylla/ppa repo
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Ubuntu 16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
2. Config java to 1.8, which is requested by |SCYLLA_NAME| |NEW_VERSION|
|
||||
|
||||
* sudo apt-get update
|
||||
* sudo apt-get install -y |OPENJDK|
|
||||
* sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get dist-upgrade |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restore system tables
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get remove scylla\* -y
|
||||
\ sudo apt-get install |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-|SRC_VERSION| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
----------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,192 +0,0 @@
|
||||
=============================================================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
Upgrading your Scylla version is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check the cluster's schema
|
||||
* Drain the node and backup the data
|
||||
* Backup the configuration file
|
||||
* Stop the Scylla service
|
||||
* Download and install new Scylla packages
|
||||
* Start the Scylla service
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check the cluster schema
|
||||
------------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade as any schema disagreement between the nodes causes the upgrade to fail.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is **highly recommended** to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to an external backup device.
|
||||
|
||||
When the upgrade is complete (for all nodes), remove the snapshot by running ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of disk space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what Scylla version you are currently running with ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. Install the new Scylla version
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. include:: /rst_include/scylla-commands-start-index.rst
|
||||
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version. Validate that the version matches the one you upgraded to.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade was successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for the nodes that you upgraded to |NEW_VERSION|
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Reload the systemd configuration
|
||||
* Restart the Scylla service
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the rollback was successful and that the node is up and running with the old version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-src| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
---------------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
|
||||
.. include:: /rst_include/scylla-commands-start-index.rst
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster. Keep in mind that the version you want to see on your node is the old version, which you noted at the beginning of the procedure.
|
||||
@@ -1,203 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use |SCYLLA_MONITOR|_ or newer, for the Dashboards.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-|SRC_VERSION|
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
|
||||
2. Install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get clean all
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get dist-upgrade |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restore system tables
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. install
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo apt-get update
|
||||
\ sudo apt-get remove scylla\* -y
|
||||
\ sudo apt-get install |PKG_NAME|
|
||||
\
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-|SRC_VERSION| /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Reload systemd configuration
|
||||
----------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -69,7 +69,7 @@ Stop ScyllaDB
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure-v4>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
@@ -98,6 +98,8 @@ Once you are sure the node upgrade was successful, move to the next node in the
|
||||
|
||||
See |Scylla_METRICS|_ for more information..
|
||||
|
||||
.. _rollback-procedure-v4:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ The following example shows the upgrade path for a 3-node cluster from version 4
|
||||
#. Upgrade all three nodes to version 4.6.
|
||||
|
||||
|
||||
Upgrading to each patch version by following the :doc:`Scylla Maintenance Release Upgrade Guide </upgrade/upgrade-opensource/upgrade-guide-from-4.x.y-to-4.x.z/index>`
|
||||
Upgrading to each patch version by following the Maintenance Release Upgrade Guide
|
||||
is optional. However, we recommend upgrading to the latest patch release for your version before upgrading to a new version.
|
||||
For example, upgrade to patch 4.4.8 before upgrading to version 4.5.
|
||||
|
||||
|
||||
@@ -10,25 +10,7 @@ Upgrade ScyllaDB Open Source
|
||||
ScyllaDB 5.0 to 5.1 <upgrade-guide-from-5.0-to-5.1/index>
|
||||
ScyllaDB 5.x maintenance release <upgrade-guide-from-5.x.y-to-5.x.z/index>
|
||||
ScyllaDB 4.6 to 5.0 <upgrade-guide-from-4.6-to-5.0/index>
|
||||
ScyllaDb 4.5 to 4.6 <upgrade-guide-from-4.5-to-4.6/index>
|
||||
ScyllaDB 4.4 to 4.5 <upgrade-guide-from-4.4-to-4.5/index>
|
||||
ScyllaDB 4.3 to 4.4 <upgrade-guide-from-4.3-to-4.4/index>
|
||||
ScyllaDB 4.2 to 4.3 <upgrade-guide-from-4.2-to-4.3/index>
|
||||
ScyllaDB 4.1 to 4.2 <upgrade-guide-from-4.1-to-4.2/index>
|
||||
ScyllaDB 4.x maintenance release <upgrade-guide-from-4.x.y-to-4.x.z/index>
|
||||
ScyllaDB 4.0 to 4.1 <upgrade-guide-from-4.0-to-4.1/index>
|
||||
ScyllaDB 3.x maintenance release <upgrade-guide-from-3.x.y-to-3.x.z/index>
|
||||
ScyllaDB 3.3 to 4.0 <upgrade-guide-from-3.3-to-4.0/index>
|
||||
ScyllaDB 3.2 to 3.3 <upgrade-guide-from-3.2-to-3.3/index>
|
||||
ScyllaDB 3.1 to 3.2 <upgrade-guide-from-3.1-to-3.2/index>
|
||||
ScyllaDB 3.0 to 3.1 <upgrade-guide-from-3.0-to-3.1/index>
|
||||
ScyllaDB 2.3 to 3.0 <upgrade-guide-from-2.3-to-3.0/index>
|
||||
ScyllaDB 2.2 to 2.3 <upgrade-guide-from-2.2-to-2.3/index>
|
||||
ScyllaDB 2.1 to 2.2 <upgrade-guide-from-2.1-to-2.2/index>
|
||||
ScyllaDB 2.x maintenance release <upgrade-guide-from-2.x.y-to-2.x.z/index>
|
||||
Older versions <upgrade-archive>
|
||||
Ubuntu 14.04 to 16.04 <upgrade-guide-from-ubuntu-14-to-16>
|
||||
|
||||
|
||||
|
||||
.. panel-box::
|
||||
:title: Upgrade ScyllaDB Open Source
|
||||
@@ -43,22 +25,4 @@ Upgrade ScyllaDB Open Source
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.0 to 5.1 <upgrade-guide-from-5.0-to-5.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.x maintenance releases <upgrade-guide-from-5.x.y-to-5.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.6 to 5.0 <upgrade-guide-from-4.6-to-5.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.5 to 4.6 <upgrade-guide-from-4.5-to-4.6/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.4 to 4.5 <upgrade-guide-from-4.4-to-4.5/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.3 to 4.4 <upgrade-guide-from-4.3-to-4.4/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.2 to 4.3 <upgrade-guide-from-4.2-to-4.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.1 to 4.2 <upgrade-guide-from-4.1-to-4.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.x maintenance release <upgrade-guide-from-4.x.y-to-4.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.0 to 4.1 <upgrade-guide-from-4.0-to-4.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.x maintenance release <upgrade-guide-from-3.x.y-to-3.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.3 to 4.0 <upgrade-guide-from-3.3-to-4.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.2 to 3.3 <upgrade-guide-from-3.2-to-3.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.1 to 3.2 <upgrade-guide-from-3.1-to-3.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 3.0 to 3.1 <upgrade-guide-from-3.0-to-3.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.3 to 3.0 <upgrade-guide-from-2.3-to-3.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.2 to 2.3 <upgrade-guide-from-2.2-to-2.3/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.1 to 2.2 <upgrade-guide-from-2.1-to-2.2/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 2.x maintenance release <upgrade-guide-from-2.x.y-to-2.x.z/index>`
|
||||
* :doc:`Upgrade Guide - older versions <upgrade-archive>`
|
||||
* :doc:`Upgrade Guide - Ubuntu 14.04 to 16.04 <upgrade-guide-from-ubuntu-14-to-16>`
|
||||
* :ref:`Upgrade Unified Installer (relocatable executable) install <unified-installed-upgrade>`
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
==========================================
|
||||
Upgrade Scylla Opensource - older versions
|
||||
==========================================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
||||
Scylla 2.0 to 2.1 <upgrade-guide-from-2.0-to-2.1/index>
|
||||
Scylla 1.7 to 2.0 <upgrade-guide-from-1.7-to-2.0/index>
|
||||
Scylla 1.6 to 1.7 <upgrade-guide-from-1.6-to-1.7/index>
|
||||
Scylla 1.5 to 1.6 <upgrade-guide-from-1.5-to-1.6/index>
|
||||
Scylla 1.4 to 1.5 <upgrade-guide-from-1.4-to-1.5/index>
|
||||
Scylla 1.3 to 1.4 <upgrade-guide-from-1.3-to-1.4/index>
|
||||
Scylla 1.2 to 1.3 <upgrade-guide-from-1.2-to-1.3/index>
|
||||
Scylla 1.1 to 1.2 <upgrade-guide-from-1.1-to-1.2/index>
|
||||
Scylla 1.0 to 1.1 <upgrade-guide-from-1.0-to-1.1/index>
|
||||
Scylla 1.x maintenance release <upgrade-guide-from-1.x.y-to-1.x.z/index>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla Open Source</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Procedures for upgrading to a newer version of Scylla Open Source. For the latest open source versions see :doc:`here </upgrade/upgrade-opensource/index>`.
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 2.0 to 2.1 </upgrade/upgrade-opensource/upgrade-guide-from-2.0-to-2.1/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.x maintenance release </upgrade/upgrade-opensource/upgrade-guide-from-1.x.y-to-1.x.z/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.7 to 2.0 </upgrade/upgrade-opensource/upgrade-guide-from-1.7-to-2.0/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.6 to 1.7 </upgrade/upgrade-opensource/upgrade-guide-from-1.6-to-1.7/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.5 to 1.6 </upgrade/upgrade-opensource/upgrade-guide-from-1.5-to-1.6/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.4 to 1.5 </upgrade/upgrade-opensource/upgrade-guide-from-1.4-to-1.5/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.3 to 1.4 </upgrade/upgrade-opensource/upgrade-guide-from-1.3-to-1.4/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.2 to 1.3 </upgrade/upgrade-opensource/upgrade-guide-from-1.2-to-1.3/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.1 to 1.2 </upgrade/upgrade-opensource/upgrade-guide-from-1.1-to-1.2/index>`
|
||||
|
||||
* :doc:`Upgrade Guide - Scylla 1.0 to 1.1 </upgrade/upgrade-opensource/upgrade-guide-from-1.0-to-1.1/index>`
|
||||
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
=================================
|
||||
Upgrade - Scylla 1.0 to 1.1
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Fedora, Red Hat Enterprise Linux, and CentOS <upgrade-guide-from-1.0-to-1.1-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.0-to-1.1-ubuntu>
|
||||
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.0.x to 1.1.y on Fedora, Red Hat Enterprise Linux, and CentOS <upgrade-guide-from-1.0-to-1.1-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.0.x to 1.1.y on Ubuntu <upgrade-guide-from-1.0-to-1.1-ubuntu>`
|
||||
@@ -1,183 +0,0 @@
|
||||
==================================================================
|
||||
Upgrade Guide - Scylla 1.0 to 1.1 for Red Hat Enterprise or CentOS
|
||||
==================================================================
|
||||
This document is a step by step procedure for upgrading from Scylla 1.0
|
||||
to Scylla 1.1, and rollback to 1.0 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
-------------------
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.0.x to
|
||||
Scylla version 1.1.y, on the following platforms:
|
||||
|
||||
- Fedora 22
|
||||
|
||||
- Red Hat Enterprise Linux, version 7 and later
|
||||
|
||||
- CentOS, version 7 and later
|
||||
|
||||
Upgrade Procedure
|
||||
-----------------
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- download and install new Scylla packages
|
||||
- gracefully restart Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.1 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Backup the data
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
Upgrade steps
|
||||
-------------
|
||||
|
||||
Backup configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.0
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``rpm -qa | grep scylla-server``. You should use the same version in
|
||||
case you want to
|
||||
:ref:`rollback <upgrade-1.0-1.1-rpm-rollback-procedure>` the
|
||||
upgrade. If you are not running a 1.0.x version, stop right here! This
|
||||
guide only covers 1.0.x to 1.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to
|
||||
**1.1**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla-server scylla-jmx scylla-tools -y
|
||||
|
||||
Gracefully restart the node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl restart scylla-server.service
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``journalctl _COMM=scylla`` to check there are no new errors in
|
||||
the log.
|
||||
3. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.0-1.1-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
------------------
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.1.x
|
||||
to 1.0.y. Apply this procedure if an upgrade from 1.0 to 1.1 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.1
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.0, you will:
|
||||
|
||||
- retrieve the old Scylla packages
|
||||
- drain the node
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
--------------
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to
|
||||
**1.0**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum downgrade scylla-server scylla-jmx scylla-tools -y
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Restore the configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.0 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server.service
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,185 +0,0 @@
|
||||
============================================
|
||||
Upgrade Guide - Scylla 1.0 to 1.1 for Ubuntu
|
||||
============================================
|
||||
This document is a step by step procedure for upgrading from Scylla 1.0
|
||||
to Scylla 1.1, and rollback to 1.0 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
-------------------
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.0.x to
|
||||
Scylla version 1.1.y on the following platform:
|
||||
|
||||
- Ubuntu 14.04
|
||||
|
||||
Upgrade Procedure
|
||||
-----------------
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- download and install new Scylla packages
|
||||
- gracefully restart Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.1 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Backup the data
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
Upgrade steps
|
||||
-------------
|
||||
|
||||
Backup configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.0
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``dpkg -s scylla-server``. You should use the same version in case you
|
||||
want to :ref:`rollback <upgrade-1.0-1.1-ubuntu-rollback-procedure>`
|
||||
the upgrade. If you are not running a 1.0.x version, stop right here!
|
||||
This guide only covers 1.0.x to 1.1.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.1**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get upgrade scylla-server scylla-jmx scylla-tools
|
||||
|
||||
Answer ‘y’ to the first two questions and 'n' when asked to overwrite
|
||||
``scylla.yaml``.
|
||||
|
||||
Gracefully restart the node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server restart
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Check ``/var/log/upstart/scylla-server.log`` and ``/var/log/syslog``
|
||||
to validate there are no errors.
|
||||
3. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.0-1.1-ubuntu-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
------------------
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.1.x
|
||||
to 1.0.y. Apply this procedure if an upgrade from 1.0 to 1.1 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.1
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.0, you will:
|
||||
|
||||
- retrieve the old Scylla packages
|
||||
- drain the node
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
--------------
|
||||
|
||||
download and install the old release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.0**
|
||||
3. install
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove --assume-yes scylla-server scylla-jmx scylla-tools
|
||||
sudo apt-get install scylla-server scylla-jmx scylla-tools
|
||||
|
||||
Answer ‘y’ to the first two questions and 'n' when asked to overwrite
|
||||
``scylla.yaml``.
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server.service
|
||||
|
||||
Restore the configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.0 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,33 +0,0 @@
|
||||
=================================
|
||||
Upgrade - Scylla 1.1 to 1.2
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Fedora, Red Hat Enterprise Linux, and CentOS <upgrade-guide-from-1.1-to-1.2-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.1-to-1.2-ubuntu>
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.1 to 1.2</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.1.x to 1.2.y on Fedora, Red Hat Enterprise Linux, and CentOS <upgrade-guide-from-1.1-to-1.2-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.1.x to 1.2.y on Ubuntu <upgrade-guide-from-1.1-to-1.2-ubuntu>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,185 +0,0 @@
|
||||
==================================================================
|
||||
Upgrade Guide - Scylla 1.1 to 1.2 for Red Hat Enterprise or CentOS
|
||||
==================================================================
|
||||
This document is a step by step procedure for upgrading from Scylla 1.1
|
||||
to Scylla 1.2, and rollback to 1.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
-------------------
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.1.x to
|
||||
Scylla version 1.2.y, on the following platforms:
|
||||
|
||||
- Fedora 22
|
||||
|
||||
- Red Hat Enterprise Linux, version 7 and later
|
||||
|
||||
- CentOS, version 7 and later
|
||||
|
||||
Upgrade Procedure
|
||||
-----------------
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- download and install new Scylla packages
|
||||
- gracefully restart Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.2 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Backup the data
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
Upgrade steps
|
||||
-------------
|
||||
|
||||
Backup configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.1
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``rpm -qa | grep scylla-server``. You should use the same version in
|
||||
case you want to
|
||||
:ref:`rollback <upgrade-1.1-1.2-rpm-rollback-procedure>` the
|
||||
upgrade. If you are not running a 1.1.x version, stop right here! This
|
||||
guide only covers 1.1.x to 1.2.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to
|
||||
**1.2**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla-server scylla-jmx scylla-tools -y
|
||||
sudo yum install scylla
|
||||
|
||||
Gracefully restart the node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl restart scylla-server.service
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``journalctl _COMM=scylla`` to check there are no new errors in
|
||||
the log.
|
||||
3. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.1-1.2-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
------------------
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.2.x
|
||||
to 1.1.y. Apply this procedure if an upgrade from 1.1 to 1.2 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.2
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.1, you will:
|
||||
|
||||
- retrieve the old Scylla packages
|
||||
- drain the node
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
--------------
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to
|
||||
**1.1**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum remove scylla
|
||||
sudo yum downgrade scylla-server scylla-jmx scylla-tools -y
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Restore the configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server.service
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,191 +0,0 @@
|
||||
============================================
|
||||
Upgrade Guide - Scylla 1.1 to 1.2 for Ubuntu
|
||||
============================================
|
||||
This document is a step by step procedure for upgrading from Scylla 1.1
|
||||
to Scylla 1.2, and rollback to 1.1 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
-------------------
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.1.x to
|
||||
Scylla version 1.2.y on the following platform:
|
||||
|
||||
- Ubuntu 14.04
|
||||
|
||||
Upgrade Procedure
|
||||
-----------------
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- download and install new Scylla packages
|
||||
- gracefully restart Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.2 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Backup the data
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
Upgrade steps
|
||||
-------------
|
||||
|
||||
Backup configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.1
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``dpkg -s scylla-server``. You should use the same version in case you
|
||||
want to :ref:`rollback <upgrade-1.1-1.2-ubuntu-rollback-procedure>`
|
||||
the upgrade. If you are not running a 1.1.x version, stop right here!
|
||||
This guide only covers 1.1.x to 1.2.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.2**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla-server scylla-jmx scylla-tools
|
||||
|
||||
Answer 'y'
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions and 'n' when asked to overwrite
|
||||
``scylla.yaml``.
|
||||
|
||||
Gracefully restart the node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server restart
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Check ``/var/log/upstart/scylla-server.log`` and ``/var/log/syslog``
|
||||
to validate there are no errors.
|
||||
3. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.1-1.2-ubuntu-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
------------------
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.2.x
|
||||
to 1.1.y. Apply this procedure if an upgrade from 1.1 to 1.2 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.2
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.1, you will:
|
||||
|
||||
- retrieve the old Scylla packages
|
||||
- drain the node
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
--------------
|
||||
|
||||
download and install the old release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.1**
|
||||
3. install
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove --assume-yes scylla-conf scylla-server scylla-jmx scylla-tools
|
||||
sudo apt-get install scylla-server scylla-jmx scylla-tools
|
||||
|
||||
Answer ‘y’ to the first question and 'n' when asked to overwrite
|
||||
``scylla.yaml``.
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Restore the configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,32 +0,0 @@
|
||||
=================================
|
||||
Upgrade Guide - Scylla 1.2 to 1.3
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.2-to-1.3-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.2-to-1.3-ubuntu>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.2 to 1.3</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.2.x to 1.3.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.2-to-1.3-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.2.x to 1.3.y on Ubuntu <upgrade-guide-from-1.2-to-1.3-ubuntu>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,186 +0,0 @@
|
||||
==================================================================
|
||||
Upgrade Guide - Scylla 1.2 to 1.3 for Red Hat Enterprise or CentOS
|
||||
==================================================================
|
||||
This document is a step by step procedure for upgrading from Scylla 1.2
|
||||
to Scylla 1.3, and rollback to 1.2 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
-------------------
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.2.x to
|
||||
Scylla version 1.3.y, on the following platforms:
|
||||
|
||||
- Red Hat Enterprise Linux, version 7 and later
|
||||
|
||||
- CentOS, version 7 and later
|
||||
|
||||
- No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
-----------------
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- download and install new Scylla packages
|
||||
- gracefully restart Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.3 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Backup the data
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
Upgrade steps
|
||||
-------------
|
||||
|
||||
Backup configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.2
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``rpm -qa | grep scylla-server``. You should use the same version in
|
||||
case you want to
|
||||
:ref:`rollback <upgrade-1.2-1.3-rpm-rollback-procedure>` the
|
||||
upgrade. If you are not running a 1.2.x version, stop right here! This
|
||||
guide only covers 1.2.x to 1.3.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to
|
||||
**1.3**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla scylla-server scylla-jmx scylla-tools scylla-conf scylla-kernel-conf -y
|
||||
|
||||
Gracefully restart the node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server restart
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use
|
||||
``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"``
|
||||
to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in
|
||||
the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.2-1.3-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
------------------
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.3.x
|
||||
to 1.2.y. Apply this procedure if an upgrade from 1.2 to 1.3 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.3
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.2, you will:
|
||||
|
||||
- retrieve the old Scylla packages
|
||||
- drain the node
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
--------------
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to
|
||||
**1.2**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum downgrade scylla scylla-server scylla-jmx scylla-tools scylla-conf scylla-kernel-conf -y
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Restore the configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.2 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,187 +0,0 @@
|
||||
============================================
|
||||
Upgrade Guide - Scylla 1.2 to 1.3 for Ubuntu
|
||||
============================================
|
||||
This document is a step by step procedure for upgrading from Scylla 1.2
|
||||
to Scylla 1.3, and rollback to 1.2 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
-------------------
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.2.x to
|
||||
Scylla version 1.3.y on the following platform:
|
||||
|
||||
- Ubuntu 14.04
|
||||
- Ubuntu 16.04
|
||||
|
||||
Upgrade Procedure
|
||||
-----------------
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- download and install new Scylla packages
|
||||
- gracefully restart Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.3 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Backup the data
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
Upgrade steps
|
||||
-------------
|
||||
|
||||
Backup configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.2
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
Download and install the new release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``dpkg -s scylla-server``. You should use the same version in case you
|
||||
want to :ref:`rollback <upgrade-1.2-1.3-ubuntu-rollback-procedure>`
|
||||
the upgrade. If you are not running a 1.2.x version, stop right here!
|
||||
This guide only covers 1.2.x to 1.3.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.3**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Gracefully restart the node
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server restart
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use
|
||||
``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"``
|
||||
to check scylla version.
|
||||
3. Check ``/var/log/upstart/scylla-server.log`` and ``/var/log/syslog``
|
||||
to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.2-1.3-ubuntu-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
------------------
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.3.x
|
||||
to 1.2.y. Apply this procedure if an upgrade from 1.2 to 1.3 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.3
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.2, you will:
|
||||
|
||||
- retrieve the old Scylla packages
|
||||
- drain the node
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
--------------
|
||||
|
||||
download and install the old release
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.2**
|
||||
3. install
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla scylla-server scylla-jmx scylla-tools scylla-conf scylla-kernel-conf -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Restore the configuration file
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.2 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
~~~~~~~~
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,34 +0,0 @@
|
||||
=================================
|
||||
Upgrade Guide - Scylla 1.3 to 1.4
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.3-to-1.4-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.3-to-1.4-ubuntu>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.3 to 1.4</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.3.x to 1.4.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.3-to-1.4-rpm>`
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.3.x to 1.4.y on Ubuntu <upgrade-guide-from-1.3-to-1.4-ubuntu>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,202 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 1.3 to 1.4 for Red Hat Enterprise 7 or CentOS 7
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.3
|
||||
to Scylla 1.4, and rollback to 1.3 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.3.x to
|
||||
Scylla version 1.4.y, on the following platforms:
|
||||
|
||||
- Red Hat Enterprise Linux, version 7 and later
|
||||
|
||||
- CentOS, version 7 and later
|
||||
|
||||
- No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
- drain node and backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- stop Scylla
|
||||
- download and install new Scylla packages
|
||||
- start Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.4 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed
|
||||
by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of
|
||||
space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.3
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``rpm -qa | grep scylla-server``. You should use the same version in
|
||||
case you want to
|
||||
:ref:`rollback <upgrade-1.3-1.4-rpm-rollback-procedure>` the
|
||||
upgrade. If you are not running a 1.3.x version, stop right here! This
|
||||
guide only covers 1.3.x to 1.4.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to
|
||||
**1.4**
|
||||
2. install
|
||||
|
||||
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use
|
||||
``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"``
|
||||
to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in
|
||||
the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.3-1.4-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.4.x
|
||||
to 1.3.y. Apply this procedure if an upgrade from 1.3 to 1.4 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.4
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.3, you will:
|
||||
|
||||
- drain the node and stop Scylla
|
||||
- retrieve the old Scylla packages
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to
|
||||
**1.3**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum downgrade scylla\* -y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.3 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,201 +0,0 @@
|
||||
===========================================================
|
||||
Upgrade Guide - Scylla 1.3 to 1.4 for Ubuntu 14.04 or 16.04
|
||||
===========================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.3
|
||||
to Scylla 1.4, and rollback to 1.3 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
|
||||
This guide covers upgrading Scylla from the following versions: 1.3.x to
|
||||
Scylla version 1.4.y on the following platform:
|
||||
|
||||
- Ubuntu 14.04
|
||||
- Ubuntu 16.04
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes in the cluster, you will:
|
||||
|
||||
- drain node and backup the data
|
||||
- check your current release
|
||||
- backup configuration file
|
||||
- stop Scylla
|
||||
- download and install new Scylla packages
|
||||
- start Scylla
|
||||
- validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
- Not to use new 1.4 features
|
||||
- Not to run administration functions, like repairs, refresh, rebuild
|
||||
or add or remove nodes
|
||||
- Not to apply schema changes
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
|
||||
Before any major procedure, like an upgrade, it is recommended to backup
|
||||
all the data to an external device. In Scylla, backup is done using the
|
||||
``nodetool snapshot`` command. For **each** node in the cluster, run the
|
||||
following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all
|
||||
the directories having this name under ``/var/lib/scylla`` to a backup
|
||||
device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed
|
||||
by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of
|
||||
space.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.3
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
|
||||
Before upgrading, check what version you are running now using
|
||||
``dpkg -s scylla-server``. You should use the same version in case you
|
||||
want to :ref:`rollback <upgrade-1.3-1.4-ubuntu-rollback-procedure>`
|
||||
the upgrade. If you are not running a 1.3.x version, stop right here!
|
||||
This guide only covers 1.3.x to 1.4.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.4**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all**
|
||||
nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use
|
||||
``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"``
|
||||
to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log``
|
||||
for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu
|
||||
16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are
|
||||
introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node
|
||||
in the cluster.
|
||||
|
||||
.. _upgrade-1.3-1.4-ubuntu-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.4.x
|
||||
to 1.3.y. Apply this procedure if an upgrade from 1.3 to 1.4 failed
|
||||
before completing on all nodes. Use this procedure only for nodes you
|
||||
upgraded to 1.4
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full
|
||||
cluster shutdown. For each of the nodes rollback to 1.3, you will:
|
||||
|
||||
- drain the node and stop Scylla
|
||||
- retrieve the old Scylla packages
|
||||
- restore the configuration file
|
||||
- restart Scylla
|
||||
- validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to
|
||||
the next node before validating the node is up and running with the new
|
||||
version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to
|
||||
**1.3**
|
||||
3. install
|
||||
|
||||
::
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.3 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
Check upgrade instruction above for validation. Once you are sure the
|
||||
node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,32 +0,0 @@
|
||||
=================================
|
||||
Upgrade Guide - Scylla 1.4 to 1.5
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.4-to-1.5-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.4-to-1.5-ubuntu>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.4 to 1.5</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.4.x to 1.5.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.4-to-1.5-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.4.x to 1.5.y on Ubuntu <upgrade-guide-from-1.4-to-1.5-ubuntu>`
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 1.4 to 1.5 for Red Hat Enterprise 7 or CentOS 7
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.4 to Scylla 1.5, and rollback to 1.4 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.4.x to Scylla version 1.5.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.5 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.4
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.4-1.5-rpm-rollback-procedure>` the upgrade. If you are not running a 1.4.x version, stop right here! This guide only covers 1.4.x to 1.5.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to **1.5**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-1.4-1.5-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.5.x to 1.4.y. Apply this procedure if an upgrade from 1.4 to 1.5 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.5
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.4, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to **1.4**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum downgrade scylla\* -y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.4 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,160 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.4 to 1.5 for Ubuntu 14.04 or 16.04
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.4 to Scylla 1.5, and rollback to 1.4 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.4.x to Scylla version 1.5.y on the following platform:
|
||||
|
||||
* Ubuntu 14.04
|
||||
* Ubuntu 16.04
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.5 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.4
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.4-1.5-ubuntu-rollback-procedure>` the upgrade. If you are not running a 1.4.x version, stop right here! This guide only covers 1.4.x to 1.5.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to **1.5**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-1.4-1.5-ubuntu-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
The following procedure describes a rollback from Scylla release 1.5.x to 1.4.y. Apply this procedure if an upgrade from 1.4 to 1.5 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.5
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.4, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to **1.4**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.4 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,30 +0,0 @@
|
||||
=================================
|
||||
Upgrade Guide - Scylla 1.5 to 1.6
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.5-to-1.6-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.5-to-1.6-ubuntu>
|
||||
|
||||
.. raw:: html
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.5 to 1.6</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.5.x to 1.6.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.5-to-1.6-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.5.x to 1.6.y on Ubuntu <upgrade-guide-from-1.5-to-1.6-ubuntu>`
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1,156 +0,0 @@
|
||||
======================================================================
|
||||
Upgrade Guide - Scylla 1.5 to 1.6 for Red Hat Enterprise 7 or CentOS 7
|
||||
======================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.5 to Scylla 1.6, and rollback to 1.5 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.5.x to Scylla version 1.6.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.6 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.5
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.5-1.6-rpm-rollback-procedure>` the upgrade. If you are not running a 1.5.x version, stop right here! This guide only covers 1.5.x to 1.6.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to **1.6**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-1.5-1.6-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.6.x to 1.5.y. Apply this procedure if an upgrade from 1.5 to 1.6 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.6
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.5, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to **1.5**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum downgrade scylla\* -y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.5 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,160 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.5 to 1.6 for Ubuntu 14.04 or 16.04
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.5 to Scylla 1.6, and rollback to 1.5 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.5.x to Scylla version 1.6.y on the following platform:
|
||||
|
||||
* Ubuntu 14.04
|
||||
* Ubuntu 16.04
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.6 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.5
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.5-1.6-ubuntu-rollback-procedure>` the upgrade. If you are not running a 1.5.x version, stop right here! This guide only covers 1.5.x to 1.6.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to **1.6**
|
||||
2. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Check scylla-server log (check ``/var/log/upstart/scylla-server.log`` for Ubuntu 14.04, execute ``journalctl _COMM=scylla`` for Ubuntu 16.04) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-1.5-1.6-ubuntu-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
The following procedure describes a rollback from Scylla release 1.6.x to 1.5.y. Apply this procedure if an upgrade from 1.5 to 1.6 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.6
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.5, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the `Scylla deb repo <http://www.scylladb.com/download/#fndtn-deb>`_ to **1.5**
|
||||
3. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.5 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,37 +0,0 @@
|
||||
=================================
|
||||
Upgrade Guide - Scylla 1.6 to 1.7
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.6-to-1.7-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.6-to-1.7-ubuntu>
|
||||
Debian <upgrade-guide-from-1.6-to-1.7-debian>
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.6 to 1.7</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.6.x to 1.7.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.6-to-1.7-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.6.x to 1.7.y on Ubuntu <upgrade-guide-from-1.6-to-1.7-ubuntu>`
|
||||
* :doc:`Upgrade Scylla from 1.6.x to 1.7.y on Debian <upgrade-guide-from-1.6-to-1.7-debian>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
.. |OS| replace:: Debian 8
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-opensource/upgrade-guide-from-1.6-to-1.7/upgrade-guide-from-1.6-to-1.7-debian/#upgrade-1-6-1-7-rollback-procedure
|
||||
.. |APT| replace:: Scylla deb repo
|
||||
.. _APT: http://www.scylladb.com/download/#fndtn-Debian
|
||||
.. |ENABLE_APT_REPO| replace:: echo 'deb http://http.debian.net/debian jessie-backports main' > /etc/apt/sources.list.d/jessie-backports.list
|
||||
.. |JESSIE_BACKPORTS| replace:: -t jessie-backports openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-1.6-to-1.7-ubuntu-and-debian.rst
|
||||
@@ -1,157 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.6 to 1.7 for Red Hat Enterprise 7 or CentOS 7
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.6 to Scylla 1.7, and rollback to 1.6 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.6.x to Scylla version 1.7.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.7 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.6
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.6-1.7-rpm-rollback-procedure>` the upgrade. If you are not running a 1.6.x version, stop right here! This guide only covers 1.6.x to 1.7.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to **1.7**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-1.6-1.7-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.7.x to 1.6.y. Apply this procedure if an upgrade from 1.6 to 1.7 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.7
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.6, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/centos_rpm>`_ to **1.6**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum downgrade scylla\* -y
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.6 /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,8 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 14.04 or 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-opensource/upgrade-guide-from-1.6-to-1.7/upgrade-guide-from-1.6-to-1.7-ubuntu/#upgrade-1-6-1-7-rollback-procedure
|
||||
.. |APT| replace:: Scylla deb repo
|
||||
.. _APT: http://www.scylladb.com/download/#fndtn-deb
|
||||
.. |ENABLE_APT_REPO| replace:: sudo add-apt-repository -y ppa:openjdk-r/ppa
|
||||
.. |JESSIE_BACKPORTS| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-1.6-to-1.7-ubuntu-and-debian.rst
|
||||
@@ -1,43 +0,0 @@
|
||||
=================================
|
||||
Upgrade Guide - Scylla 1.7 to 2.0
|
||||
=================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.7-to-2.0-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.7-to-2.0-ubuntu>
|
||||
Debian <upgrade-guide-from-1.7-to-2.0-debian>
|
||||
Metrics <metric-update-1.7-to-2.0>
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
<div class="row">
|
||||
<div class="medium-3 columns">
|
||||
<h5 id="getting-started">Upgrade Scylla from 1.7 to 2.0</h5>
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.7.x to 2.0.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-1.7-to-2.0-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.7.x to 2.0.y on Ubuntu <upgrade-guide-from-1.7-to-2.0-ubuntu>`
|
||||
* :doc:`Upgrade Scylla from 1.7.x to 2.0.y on Debian <upgrade-guide-from-1.7-to-2.0-debian>`
|
||||
* :doc:`Scylla Metrics Update - Scylla 1.7 to 2.0<metric-update-1.7-to-2.0>`
|
||||
|
||||
|
||||
.. raw:: html
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
|
||||
|
||||
========================================
|
||||
Scylla Metric Update - Scylla 1.7 to 2.0
|
||||
========================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
The following metric names have changed
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* scylla_cache_evictions To scylla_cache_partition_evictions
|
||||
* scylla_cache_hits To scylla_cache_partition_hits
|
||||
* scylla_cache_insertions To scylla_cache_partition_insertions
|
||||
* scylla_cache_merges To scylla_cache_partition_merges
|
||||
* scylla_cache_misses To scylla_cache_partition_misses
|
||||
* scylla_cache_removals To scylla_cache_partition_removals
|
||||
* scylla_cache_total To scylla_cache_bytes_total
|
||||
* scylla_cache_used To scylla_cache_bytes_used
|
||||
|
||||
The following metrics are no longer available
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* scylla_cache_uncached_wide_partitions
|
||||
* scylla_cache_wide_partition_evictions
|
||||
* scylla_cache_wide_partition_mispopulations
|
||||
|
||||
The following metrics are new in Scylla 2.0
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
* scylla_cache_mispopulations
|
||||
* scylla_cache_reads
|
||||
* scylla_cache_active_reads
|
||||
* scylla_cache_reads_with_misses
|
||||
* scylla_cache_row_hits
|
||||
* scylla_cache_row_insertions
|
||||
* scylla_cache_row_misses
|
||||
* scylla_cache_sstable_partition_skips
|
||||
* scylla_cache_sstable_reader_recreations
|
||||
* scylla_cache_sstable_row_skips
|
||||
* scylla_column_family_live_disk_space
|
||||
* scylla_column_family_live_sstable
|
||||
* scylla_column_family_memtable_switch
|
||||
* scylla_column_family_pending_compaction
|
||||
* scylla_column_family_pending_tasks
|
||||
* scylla_column_family_total_disk_space
|
||||
* scylla_database_active_reads_streaming
|
||||
* scylla_database_counter_cell_lock_acquisition
|
||||
* scylla_database_counter_cell_lock_pending
|
||||
* scylla_database_cpu_flush_quota
|
||||
* scylla_database_queued_reads_streaming
|
||||
* scylla_execution_stages_function_calls_enqueued
|
||||
* scylla_execution_stages_function_calls_executed
|
||||
* scylla_execution_stages_tasks_preempted
|
||||
* scylla_execution_stages_tasks_scheduled
|
||||
* scylla_scylladb_current_version
|
||||
* scylla_sstables_index_page_blocks
|
||||
* scylla_sstables_index_page_hits
|
||||
* scylla_sstables_index_page_misses
|
||||
* scylla_storage_proxy_coordinator_background_read_repairs
|
||||
* scylla_storage_proxy_coordinator_foreground_read_repair
|
||||
* scylla_storage_proxy_coordinator_read_latency
|
||||
* scylla_storage_proxy_coordinator_write_latency
|
||||
* scylla_storage_proxy_replica_received_counter_updates
|
||||
@@ -1,8 +0,0 @@
|
||||
.. |OS| replace:: Debian 8
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-opensource/upgrade-guide-from-1.7-to-2.0/upgrade-guide-from-1.7-to-2.0-debian/#rollback-procedure
|
||||
.. |APT| replace:: Scylla deb repo
|
||||
.. _APT: http://www.scylladb.com/download/debian8/
|
||||
.. |ENABLE_APT_REPO| replace:: echo 'deb http://http.debian.net/debian jessie-backports main' > /etc/apt/sources.list.d/jessie-backports.list
|
||||
.. |JESSIE_BACKPORTS| replace:: -t jessie-backports openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-1.7-to-2.0-ubuntu-and-debian.rst
|
||||
@@ -1,178 +0,0 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - Scylla 1.7 to 2.0 for Red Hat Enterprise Linux 7 or CentOS 7
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.7 to Scylla 2.0, and rollback to 1.7 if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.7.x (x >= 4) to Scylla version 2.0.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* check cluster schema
|
||||
* drain node and backup the data
|
||||
* backup configuration file
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 2.0 features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Check cluster schema
|
||||
--------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade, we won't survive an upgrade that has schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.7
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.7-2.0-rpm-rollback-procedure>` the upgrade. If you are not running a 1.7.x version, stop right here! This guide only covers 1.7.x to 2.0.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/?platform=centos>`_ to **2.0**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
* More on :doc:`Scylla Metrics Update - Scylla 1.7 to 2.0<metric-update-1.7-to-2.0>`
|
||||
|
||||
.. _upgrade-1.7-2.0-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 2.0.x to 1.7.y (y >= 4). Apply this procedure if an upgrade from 1.7 to 2.0 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2.0
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.7, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the `Scylla RPM repo <http://www.scylladb.com/download/?platform=centos>`_ to **1.7**
|
||||
3. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum remove scylla-tools-core
|
||||
sudo yum downgrade scylla\* -y
|
||||
sudo yum install scylla
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.7 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, 2.0 uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,8 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 14.04 or 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-opensource/upgrade-guide-from-1.7-to-2.0/upgrade-guide-from-1.7-to-2.0-ubuntu/#rollback-procedure
|
||||
.. |APT| replace:: Scylla deb repo
|
||||
.. _APT: http://www.scylladb.com/download/
|
||||
.. |ENABLE_APT_REPO| replace:: sudo add-apt-repository -y ppa:openjdk-r/ppa
|
||||
.. |JESSIE_BACKPORTS| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-1.7-to-2.0-ubuntu-and-debian.rst
|
||||
@@ -1,17 +0,0 @@
|
||||
=================================================
|
||||
Upgrade Guide - Scylla Maintenance Release
|
||||
=================================================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:hidden:
|
||||
|
||||
Red Hat Enterprise Linux, and CentOS <upgrade-guide-from-1.x.y-to-1.x.z-rpm>
|
||||
Ubuntu <upgrade-guide-from-1.x.y-to-1.x.z-ubuntu>
|
||||
Debian <upgrade-guide-from-1.x.y-to-1.x.z-debian>
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade Scylla from 1.x.y to 1.x.z on Red Hat Enterprise Linux, and CentOS <upgrade-guide-from-1.x.y-to-1.x.z-rpm>`
|
||||
* :doc:`Upgrade Scylla from 1.x.y to 1.x.z on Ubuntu <upgrade-guide-from-1.x.y-to-1.x.z-ubuntu>`
|
||||
* :doc:`Upgrade Scylla from 1.x.y to 1.x.z on Debian <upgrade-guide-from-1.x.y-to-1.x.z-debian>`
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Debian 8
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-opensource/upgrade-guide-from-1.x.y-to-1.x.z/upgrade-guide-from-1.x.y-to-1.x.z-debian/#upgrade-1.x.y-1.x.z-rollback-procedure
|
||||
.. |APT| replace:: Scylla deb repo
|
||||
.. _APT: http://www.scylladb.com/download/#fndtn-Debian
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-1.x.y-to-1.x.z-ubuntu-and-debian.rst
|
||||
@@ -1,156 +0,0 @@
|
||||
==========================================================================
|
||||
Upgrade Guide - Scylla 1.x.y to 1.x.z for Red Hat Enterprise 7 or CentOS 7
|
||||
==========================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from Scylla 1.x.y to Scylla 1.x.z.
|
||||
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: 1.x.y to Scylla version 1.x.z, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
* No longer provide packages for Fedora
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
.. include:: /upgrade/_common/warning.rst
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does not require full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* drain node and backup the data
|
||||
* check your current release
|
||||
* backup configuration file and rpm packages
|
||||
* stop Scylla
|
||||
* download and install new Scylla packages
|
||||
* start Scylla
|
||||
* validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new 1.x.z features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes
|
||||
* Not to apply schema changes
|
||||
|
||||
Upgrade steps
|
||||
=============
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
Backup configuration file and rpm packages
|
||||
------------------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-1.x.z
|
||||
|
||||
If you install scylla by yum, you can find the rpm packages in ``/var/cache/yum/``, backup scylla packages to ``scylla_1.x.y_backup`` directory which will be used in rollback.
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version in case you want to :ref:`rollback <upgrade-1.x.y-to-1.x.z-rpm-rollback-procedure>` the upgrade. If you are not running a 1.x.y version, stop right here! This guide only covers 1.x.y to 1.x.z upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the `Scylla RPM repo <http://www.scylladb.com/download/#fndtn-RPM>`_ to **1.x**
|
||||
2. install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check scylla version.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
.. _upgrade-1.x.y-to-1.x.z-rpm-rollback-procedure:
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from Scylla release 1.x.z to 1.x.y. Apply this procedure if an upgrade from 1.x.y to 1.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 1.x.z
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to 1.x.y, you will:
|
||||
|
||||
* drain the node and stop Scylla
|
||||
* retrieve the old Scylla packages
|
||||
* restore the configuration file
|
||||
* restart Scylla
|
||||
* validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
|
||||
Rollback steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Install the old release from backuped rpm packages
|
||||
--------------------------------------------------
|
||||
1. Install
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo yum remove scylla\* -y
|
||||
sudo yum install scylla_1.x.y_backup/scylla*.rpm
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-1.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -1,6 +0,0 @@
|
||||
.. |OS| replace:: Ubuntu 14.04 or 16.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-opensource/upgrade-guide-from-1.x.y-to-1.x.z/upgrade-guide-from-1.x.y-to-1.x.z-ubuntu/#upgrade-1-x-y-1-x-z-rollback-procedure
|
||||
.. |APT| replace:: Scylla deb repo
|
||||
.. _APT: http://www.scylladb.com/download/#fndtn-deb
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-1.x.y-to-1.x.z-ubuntu-and-debian.rst
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user