Compare commits
197 Commits
next
...
scylla-5.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba1a57bd55 | ||
|
|
1891ad2551 | ||
|
|
2ea3d5ebf0 | ||
|
|
d7d9300453 | ||
|
|
ab2817b4a6 | ||
|
|
73a340033e | ||
|
|
fec4f88744 | ||
|
|
802beb972e | ||
|
|
1101694169 | ||
|
|
d361bce0f6 | ||
|
|
7e0dcf9bc5 | ||
|
|
011c5ac37e | ||
|
|
9430465a52 | ||
|
|
6f28b77962 | ||
|
|
06d5557c42 | ||
|
|
bc31472469 | ||
|
|
fcab382e37 | ||
|
|
be6c3022ce | ||
|
|
6fa78b90b5 | ||
|
|
8b5a342a92 | ||
|
|
6905f5056f | ||
|
|
c90807d29f | ||
|
|
c468c61ddc | ||
|
|
05a3e97077 | ||
|
|
41e93b2e69 | ||
|
|
9f00af9395 | ||
|
|
7e78b29609 | ||
|
|
7007c94f5d | ||
|
|
5c5a9633ea | ||
|
|
c75359d664 | ||
|
|
24f76f40b7 | ||
|
|
3f133cfa87 | ||
|
|
d66e23b265 | ||
|
|
378e8761b9 | ||
|
|
fc26f6b850 | ||
|
|
7663dc31b8 | ||
|
|
53b6e720f6 | ||
|
|
46d6145b37 | ||
|
|
772ac59299 | ||
|
|
1f334e48b2 | ||
|
|
389050e421 | ||
|
|
2152b765a6 | ||
|
|
d43b6db152 | ||
|
|
a514e60e65 | ||
|
|
6eb70caba9 | ||
|
|
dd094f1230 | ||
|
|
530600a646 | ||
|
|
c8e5f8c66b | ||
|
|
ca7a13cad2 | ||
|
|
d401551020 | ||
|
|
d952bf4035 | ||
|
|
b346136e98 | ||
|
|
87e267213d | ||
|
|
5bda9356d5 | ||
|
|
f9fe48ad89 | ||
|
|
0c9a0faf0d | ||
|
|
47dcfd866c | ||
|
|
5c9ecd5604 | ||
|
|
1a37b85d14 | ||
|
|
5070ddb723 | ||
|
|
7480af58e5 | ||
|
|
43d46a241f | ||
|
|
3807020a7b | ||
|
|
d6fb20f30e | ||
|
|
33c20eebe6 | ||
|
|
f3a6af663d | ||
|
|
1685af9829 | ||
|
|
bb880c7658 | ||
|
|
f952d397e8 | ||
|
|
5e88421360 | ||
|
|
1945102ca0 | ||
|
|
be3f6f8c7b | ||
|
|
94735f63a3 | ||
|
|
b0d28919c0 | ||
|
|
addc4666d5 | ||
|
|
a14ffbd5e2 | ||
|
|
ea56ecace0 | ||
|
|
fe8d8f97e2 | ||
|
|
44e920cbb0 | ||
|
|
b114551d53 | ||
|
|
099145fe9a | ||
|
|
0bdbce90f4 | ||
|
|
8ffdb8546b | ||
|
|
db382697f1 | ||
|
|
3c02e5d263 | ||
|
|
4c0f7ea098 | ||
|
|
c14a0340ca | ||
|
|
aa523141f9 | ||
|
|
86240d6344 | ||
|
|
95a94a2687 | ||
|
|
9173a3d808 | ||
|
|
7942041b95 | ||
|
|
1cfedc5b59 | ||
|
|
606ed61263 | ||
|
|
796e4c39f8 | ||
|
|
960434f784 | ||
|
|
05aed0417a | ||
|
|
9a1fc200e1 | ||
|
|
c7e9bbc377 | ||
|
|
a78dac7ae9 | ||
|
|
0debb419f7 | ||
|
|
0cfb950569 | ||
|
|
eebe77b5b8 | ||
|
|
aa206a6b6a | ||
|
|
9baf72b049 | ||
|
|
8e62405117 | ||
|
|
15421e45a0 | ||
|
|
cf6bcffc1b | ||
|
|
a50b7f3d6a | ||
|
|
0e06025487 | ||
|
|
820b79d56e | ||
|
|
fde4a6e92d | ||
|
|
34b8d4306c | ||
|
|
69e8fb997c | ||
|
|
fbfd91e02f | ||
|
|
4c19b48495 | ||
|
|
9deca4250f | ||
|
|
97ab2a4eb3 | ||
|
|
16a941db3b | ||
|
|
8f60a464a7 | ||
|
|
1e72f9cb5e | ||
|
|
d91da87313 | ||
|
|
c73d59c1cb | ||
|
|
9a6c0a89a0 | ||
|
|
5b7dd00b14 | ||
|
|
237df3b935 | ||
|
|
993d0371d9 | ||
|
|
04167eba68 | ||
|
|
ddf8eaba04 | ||
|
|
d5e5d27929 | ||
|
|
bb69ece13d | ||
|
|
d4268863cd | ||
|
|
8b05c67226 | ||
|
|
3afc58de7d | ||
|
|
496696140b | ||
|
|
3a070380a7 | ||
|
|
7b551d8ce4 | ||
|
|
2018b8fcfd | ||
|
|
20b5aa938e | ||
|
|
f519580252 | ||
|
|
46c0a1cc0a | ||
|
|
a822282fde | ||
|
|
bc5af9fdea | ||
|
|
c8bb147f84 | ||
|
|
dc92ec4c8b | ||
|
|
8f7e3275a2 | ||
|
|
40a1905a2d | ||
|
|
4e2c436222 | ||
|
|
68be369f93 | ||
|
|
0f7adb5f47 | ||
|
|
82dc8357ef | ||
|
|
12a58957e2 | ||
|
|
3423ad6e38 | ||
|
|
64001719fa | ||
|
|
cc3d368bc8 | ||
|
|
d957b0044b | ||
|
|
d4ed67bd47 | ||
|
|
0cd6341cae | ||
|
|
23d8852a82 | ||
|
|
88016de43e | ||
|
|
bdecf4318a | ||
|
|
72bf244ad1 | ||
|
|
ee82323599 | ||
|
|
2f78df92ab | ||
|
|
e2809674d2 | ||
|
|
0295d0c5c8 | ||
|
|
fa94222662 | ||
|
|
dff7f3c5ba | ||
|
|
3723713130 | ||
|
|
03f8411e38 | ||
|
|
0e391d67d1 | ||
|
|
f76989285e | ||
|
|
9deeeb4db1 | ||
|
|
1f3196735f | ||
|
|
abb6817261 | ||
|
|
d3fd090429 | ||
|
|
3e7c57d162 | ||
|
|
f878a34da3 | ||
|
|
eaded57b2e | ||
|
|
25d2da08d1 | ||
|
|
9b1a570f6f | ||
|
|
426d045249 | ||
|
|
86dbbf12cc | ||
|
|
b05903eddd | ||
|
|
26ead53304 | ||
|
|
f60bab9471 | ||
|
|
66f34245fc | ||
|
|
4047528bd9 | ||
|
|
1a82c61452 | ||
|
|
3d9800eb1c | ||
|
|
c48e9b47dd | ||
|
|
2eadaad9f7 | ||
|
|
d10aee15e7 | ||
|
|
9e017cb1e6 | ||
|
|
b8504cc9b2 | ||
|
|
856703a85e | ||
|
|
86a6c1fb2b |
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "seastar"]
|
||||
path = seastar
|
||||
url = ../seastar
|
||||
url = ../scylla-seastar
|
||||
ignore = dirty
|
||||
[submodule "swagger-ui"]
|
||||
path = swagger-ui
|
||||
|
||||
@@ -60,7 +60,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.1.0-dev
|
||||
VERSION=5.1.9
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#include "expressions.hh"
|
||||
#include "conditions.hh"
|
||||
#include "cql3/constants.hh"
|
||||
#include "cql3/util.hh"
|
||||
#include <optional>
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include <seastar/json/json_elements.hh>
|
||||
@@ -438,6 +439,11 @@ future<executor::request_return_type> executor::describe_table(client_state& cli
|
||||
rjson::add(table_description, "BillingModeSummary", rjson::empty_object());
|
||||
rjson::add(table_description["BillingModeSummary"], "BillingMode", "PAY_PER_REQUEST");
|
||||
rjson::add(table_description["BillingModeSummary"], "LastUpdateToPayPerRequestDateTime", rjson::value(creation_date_seconds));
|
||||
// In PAY_PER_REQUEST billing mode, provisioned capacity should return 0
|
||||
rjson::add(table_description, "ProvisionedThroughput", rjson::empty_object());
|
||||
rjson::add(table_description["ProvisionedThroughput"], "ReadCapacityUnits", 0);
|
||||
rjson::add(table_description["ProvisionedThroughput"], "WriteCapacityUnits", 0);
|
||||
rjson::add(table_description["ProvisionedThroughput"], "NumberOfDecreasesToday", 0);
|
||||
|
||||
std::unordered_map<std::string,std::string> key_attribute_types;
|
||||
// Add base table's KeySchema and collect types for AttributeDefinitions:
|
||||
@@ -460,6 +466,11 @@ future<executor::request_return_type> executor::describe_table(client_state& cli
|
||||
rjson::add(view_entry, "IndexArn", generate_arn_for_index(*schema, index_name));
|
||||
// Add indexes's KeySchema and collect types for AttributeDefinitions:
|
||||
describe_key_schema(view_entry, *vptr, key_attribute_types);
|
||||
// Add projection type
|
||||
rjson::value projection = rjson::empty_object();
|
||||
rjson::add(projection, "ProjectionType", "ALL");
|
||||
// FIXME: we have to get ProjectionType from the schema when it is added
|
||||
rjson::add(view_entry, "Projection", std::move(projection));
|
||||
// Local secondary indexes are marked by an extra '!' sign occurring before the ':' delimiter
|
||||
rjson::value& index_array = (delim_it > 1 && cf_name[delim_it-1] == '!') ? lsi_array : gsi_array;
|
||||
rjson::push_back(index_array, std::move(view_entry));
|
||||
@@ -917,9 +928,10 @@ static future<executor::request_return_type> create_table_on_shard0(tracing::tra
|
||||
if (!range_key.empty() && range_key != view_hash_key && range_key != view_range_key) {
|
||||
add_column(view_builder, range_key, attribute_definitions, column_kind::clustering_key);
|
||||
}
|
||||
sstring where_clause = "\"" + view_hash_key + "\" IS NOT NULL";
|
||||
sstring where_clause = format("{} IS NOT NULL", cql3::util::maybe_quote(view_hash_key));
|
||||
if (!view_range_key.empty()) {
|
||||
where_clause = where_clause + " AND \"" + view_hash_key + "\" IS NOT NULL";
|
||||
where_clause = format("{} AND {} IS NOT NULL", where_clause,
|
||||
cql3::util::maybe_quote(view_range_key));
|
||||
}
|
||||
where_clauses.push_back(std::move(where_clause));
|
||||
view_builders.emplace_back(std::move(view_builder));
|
||||
@@ -974,9 +986,10 @@ static future<executor::request_return_type> create_table_on_shard0(tracing::tra
|
||||
// Note above we don't need to add virtual columns, as all
|
||||
// base columns were copied to view. TODO: reconsider the need
|
||||
// for virtual columns when we support Projection.
|
||||
sstring where_clause = "\"" + view_hash_key + "\" IS NOT NULL";
|
||||
sstring where_clause = format("{} IS NOT NULL", cql3::util::maybe_quote(view_hash_key));
|
||||
if (!view_range_key.empty()) {
|
||||
where_clause = where_clause + " AND \"" + view_range_key + "\" IS NOT NULL";
|
||||
where_clause = format("{} AND {} IS NOT NULL", where_clause,
|
||||
cql3::util::maybe_quote(view_range_key));
|
||||
}
|
||||
where_clauses.push_back(std::move(where_clause));
|
||||
view_builders.emplace_back(std::move(view_builder));
|
||||
|
||||
@@ -142,19 +142,24 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
auto table = find_table(_proxy, request);
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto cfs = db.get_tables();
|
||||
auto i = cfs.begin();
|
||||
auto e = cfs.end();
|
||||
|
||||
if (limit < 1) {
|
||||
throw api_error::validation("Limit must be 1 or more");
|
||||
}
|
||||
|
||||
// TODO: the unordered_map here is not really well suited for partial
|
||||
// querying - we're sorting on local hash order, and creating a table
|
||||
// between queries may or may not miss info. But that should be rare,
|
||||
// and we can probably expect this to be a single call.
|
||||
// # 12601 (maybe?) - sort the set of tables on ID. This should ensure we never
|
||||
// generate duplicates in a paged listing here. Can obviously miss things if they
|
||||
// are added between paged calls and end up with a "smaller" UUID/ARN, but that
|
||||
// is to be expected.
|
||||
std::sort(cfs.begin(), cfs.end(), [](const data_dictionary::table& t1, const data_dictionary::table& t2) {
|
||||
return t1.schema()->id() < t2.schema()->id();
|
||||
});
|
||||
|
||||
auto i = cfs.begin();
|
||||
auto e = cfs.end();
|
||||
|
||||
if (streams_start) {
|
||||
i = std::find_if(i, e, [&](data_dictionary::table t) {
|
||||
i = std::find_if(i, e, [&](const data_dictionary::table& t) {
|
||||
return t.schema()->id() == streams_start
|
||||
&& cdc::get_base_table(db.real_database(), *t.schema())
|
||||
&& is_alternator_keyspace(t.schema()->ks_name())
|
||||
|
||||
@@ -610,6 +610,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
apilog.debug("force_keyspace_compaction: keyspace={} tables={}", keyspace, column_families);
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (replica::database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<utils::UUID>>(column_families | boost::adaptors::transformed([&] (auto& cf_name) {
|
||||
return db.find_uuid(keyspace, cf_name);
|
||||
@@ -634,6 +635,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
apilog.info("force_keyspace_cleanup: keyspace={} tables={}", keyspace, column_families);
|
||||
return ss.local().is_cleanup_allowed(keyspace).then([&ctx, keyspace,
|
||||
column_families = std::move(column_families)] (bool is_cleanup_allowed) mutable {
|
||||
if (!is_cleanup_allowed) {
|
||||
@@ -653,7 +655,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
// as a table can be dropped during loop below, let's find it before issuing the cleanup request.
|
||||
for (auto& id : table_ids) {
|
||||
replica::table& t = db.find_column_family(id);
|
||||
co_await cm.perform_cleanup(owned_ranges_ptr, t.as_table_state());
|
||||
co_await t.perform_cleanup_compaction(owned_ranges_ptr);
|
||||
}
|
||||
co_return;
|
||||
}).then([]{
|
||||
@@ -663,6 +665,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::perform_keyspace_offstrategy_compaction.set(r, wrap_ks_cf(ctx, [] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> tables) -> future<json::json_return_type> {
|
||||
apilog.info("perform_keyspace_offstrategy_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
co_return co_await ctx.db.map_reduce0([&keyspace, &tables] (replica::database& db) -> future<bool> {
|
||||
bool needed = false;
|
||||
for (const auto& table : tables) {
|
||||
@@ -676,6 +679,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::upgrade_sstables.set(r, wrap_ks_cf(ctx, [] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
bool exclude_current_version = req_param<bool>(*req, "exclude_current_version", false);
|
||||
|
||||
apilog.info("upgrade_sstables: keyspace={} tables={} exclude_current_version={}", keyspace, column_families, exclude_current_version);
|
||||
return ctx.db.invoke_on_all([=] (replica::database& db) {
|
||||
auto owned_ranges_ptr = compaction::make_owned_ranges_ptr(db.get_keyspace_local_ranges(keyspace));
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
@@ -691,6 +695,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::force_keyspace_flush.set(r, [&ctx](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
apilog.info("perform_keyspace_flush: keyspace={} tables={}", keyspace, column_families);
|
||||
auto &db = ctx.db.local();
|
||||
if (column_families.empty()) {
|
||||
co_await db.flush_on_all(keyspace);
|
||||
@@ -702,6 +707,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
|
||||
ss::decommission.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
apilog.info("decommission");
|
||||
return ss.local().decommission().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -717,6 +723,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::remove_node.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
auto host_id = req->get_query_param("host_id");
|
||||
std::vector<sstring> ignore_nodes_strs= split(req->get_query_param("ignore_nodes"), ",");
|
||||
apilog.info("remove_node: host_id={} ignore_nodes={}", host_id, ignore_nodes_strs);
|
||||
auto ignore_nodes = std::list<gms::inet_address>();
|
||||
for (std::string n : ignore_nodes_strs) {
|
||||
try {
|
||||
@@ -789,6 +796,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::drain.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
apilog.info("drain");
|
||||
return ss.local().drain().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -822,12 +830,14 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::stop_gossiping.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
apilog.info("stop_gossiping");
|
||||
return ss.local().stop_gossiping().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_gossiping.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
apilog.info("start_gossiping");
|
||||
return ss.local().start_gossiping().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -930,6 +940,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::rebuild.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
auto source_dc = req->get_query_param("source_dc");
|
||||
apilog.info("rebuild: source_dc={}", source_dc);
|
||||
return ss.local().rebuild(std::move(source_dc)).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -966,6 +977,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
// FIXME: We should truncate schema tables if more than one node in the cluster.
|
||||
auto& sp = service::get_storage_proxy();
|
||||
auto& fs = sp.local().features();
|
||||
apilog.info("reset_local_schema");
|
||||
return db::schema_tables::recalculate_schema_version(sys_ks, sp, fs).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -973,6 +985,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::set_trace_probability.set(r, [](std::unique_ptr<request> req) {
|
||||
auto probability = req->get_query_param("probability");
|
||||
apilog.info("set_trace_probability: probability={}", probability);
|
||||
return futurize_invoke([probability] {
|
||||
double real_prob = std::stod(probability.c_str());
|
||||
return tracing::tracing::tracing_instance().invoke_on_all([real_prob] (auto& local_tracing) {
|
||||
@@ -1010,6 +1023,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
auto ttl = req->get_query_param("ttl");
|
||||
auto threshold = req->get_query_param("threshold");
|
||||
auto fast = req->get_query_param("fast");
|
||||
apilog.info("set_slow_query: enable={} ttl={} threshold={} fast={}", enable, ttl, threshold, fast);
|
||||
try {
|
||||
return tracing::tracing::tracing_instance().invoke_on_all([enable, ttl, threshold, fast] (auto& local_tracing) {
|
||||
if (threshold != "") {
|
||||
@@ -1036,6 +1050,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
@@ -1043,6 +1058,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
});
|
||||
|
||||
|
||||
@@ -80,8 +80,10 @@ struct compaction_data {
|
||||
}
|
||||
|
||||
void stop(sstring reason) {
|
||||
stop_requested = std::move(reason);
|
||||
abort.request_abort();
|
||||
if (!abort.abort_requested()) {
|
||||
stop_requested = std::move(reason);
|
||||
abort.request_abort();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -842,6 +842,20 @@ future<> compaction_manager::really_do_stop() {
|
||||
cmlog.info("Stopped");
|
||||
}
|
||||
|
||||
template <typename Ex>
|
||||
requires std::is_base_of_v<std::exception, Ex> &&
|
||||
requires (const Ex& ex) {
|
||||
{ ex.code() } noexcept -> std::same_as<const std::error_code&>;
|
||||
}
|
||||
auto swallow_enospc(const Ex& ex) noexcept {
|
||||
if (ex.code().value() != ENOSPC) {
|
||||
return make_exception_future<>(std::make_exception_ptr(ex));
|
||||
}
|
||||
|
||||
cmlog.warn("Got ENOSPC on stop, ignoring...");
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
void compaction_manager::do_stop() noexcept {
|
||||
if (_state == state::none || _state == state::stopped) {
|
||||
return;
|
||||
@@ -849,7 +863,10 @@ void compaction_manager::do_stop() noexcept {
|
||||
|
||||
try {
|
||||
_state = state::stopped;
|
||||
_stop_future = really_do_stop();
|
||||
_stop_future = really_do_stop()
|
||||
.handle_exception_type([] (const std::system_error& ex) { return swallow_enospc(ex); })
|
||||
.handle_exception_type([] (const storage_io_error& ex) { return swallow_enospc(ex); })
|
||||
;
|
||||
} catch (...) {
|
||||
cmlog.error("Failed to stop the manager: {}", std::current_exception());
|
||||
}
|
||||
@@ -1050,7 +1067,7 @@ public:
|
||||
bool performed() const noexcept {
|
||||
return _performed;
|
||||
}
|
||||
|
||||
private:
|
||||
future<> run_offstrategy_compaction(sstables::compaction_data& cdata) {
|
||||
// This procedure will reshape sstables in maintenance set until it's ready for
|
||||
// integration into main set.
|
||||
@@ -1083,6 +1100,7 @@ public:
|
||||
return desc.sstables.size() ? std::make_optional(std::move(desc)) : std::nullopt;
|
||||
};
|
||||
|
||||
std::exception_ptr err;
|
||||
while (auto desc = get_next_job()) {
|
||||
desc->creator = [this, &new_unused_sstables, &t] (shard_id dummy) {
|
||||
auto sst = t.make_sstable();
|
||||
@@ -1091,7 +1109,16 @@ public:
|
||||
};
|
||||
auto input = boost::copy_range<std::unordered_set<sstables::shared_sstable>>(desc->sstables);
|
||||
|
||||
auto ret = co_await sstables::compact_sstables(std::move(*desc), cdata, t);
|
||||
sstables::compaction_result ret;
|
||||
try {
|
||||
ret = co_await sstables::compact_sstables(std::move(*desc), cdata, t);
|
||||
} catch (sstables::compaction_stopped_exception&) {
|
||||
// If off-strategy compaction stopped on user request, let's not discard the partial work.
|
||||
// Therefore, both un-reshaped and reshaped data will be integrated into main set, allowing
|
||||
// regular compaction to continue from where off-strategy left off.
|
||||
err = std::current_exception();
|
||||
break;
|
||||
}
|
||||
_performed = true;
|
||||
|
||||
// update list of reshape candidates without input but with output added to it
|
||||
@@ -1128,6 +1155,9 @@ public:
|
||||
for (auto& sst : sstables_to_remove) {
|
||||
sst->mark_for_deletion();
|
||||
}
|
||||
if (err) {
|
||||
co_await coroutine::return_exception_ptr(std::move(err));
|
||||
}
|
||||
}
|
||||
protected:
|
||||
virtual future<compaction_stats_opt> do_run() override {
|
||||
|
||||
@@ -409,7 +409,9 @@ public:
|
||||
l0_old_ssts.push_back(std::move(sst));
|
||||
}
|
||||
}
|
||||
_l0_scts.replace_sstables(std::move(l0_old_ssts), std::move(l0_new_ssts));
|
||||
if (l0_old_ssts.size() || l0_new_ssts.size()) {
|
||||
_l0_scts.replace_sstables(std::move(l0_old_ssts), std::move(l0_new_ssts));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -200,10 +200,8 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
|
||||
auto [disjoint, overlapping_sstables] = is_disjoint(level_info[level], tolerance(level));
|
||||
if (!disjoint) {
|
||||
auto ideal_level = ideal_level_for_input(input, max_sstable_size_in_bytes);
|
||||
leveled_manifest::logger.warn("Turns out that level {} is not disjoint, found {} overlapping SSTables, so compacting everything on behalf of {}.{}", level, overlapping_sstables, schema->ks_name(), schema->cf_name());
|
||||
// Unfortunately no good limit to limit input size to max_sstables for LCS major
|
||||
compaction_descriptor desc(std::move(input), iop, ideal_level, max_sstable_size_in_bytes);
|
||||
leveled_manifest::logger.warn("Turns out that level {} is not disjoint, found {} overlapping SSTables, so the level will be entirely compacted on behalf of {}.{}", level, overlapping_sstables, schema->ks_name(), schema->cf_name());
|
||||
compaction_descriptor desc(std::move(level_info[level]), iop, level, max_sstable_size_in_bytes);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
|
||||
@@ -630,6 +630,8 @@ arg_parser.add_argument('--static-yaml-cpp', dest='staticyamlcpp', action='store
|
||||
help='Link libyaml-cpp statically')
|
||||
arg_parser.add_argument('--tests-debuginfo', action='store', dest='tests_debuginfo', type=int, default=0,
|
||||
help='Enable(1)/disable(0)compiler debug information generation for tests')
|
||||
arg_parser.add_argument('--perf-tests-debuginfo', action='store', dest='perf_tests_debuginfo', type=int, default=0,
|
||||
help='Enable(1)/disable(0)compiler debug information generation for perf tests')
|
||||
arg_parser.add_argument('--python', action='store', dest='python', default='python3',
|
||||
help='Python3 path')
|
||||
arg_parser.add_argument('--split-dwarf', dest='split_dwarf', action='store_true', default=False,
|
||||
@@ -1423,6 +1425,7 @@ linker_flags = linker_flags(compiler=args.cxx)
|
||||
|
||||
dbgflag = '-g -gz' if args.debuginfo else ''
|
||||
tests_link_rule = 'link' if args.tests_debuginfo else 'link_stripped'
|
||||
perf_tests_link_rule = 'link' if args.perf_tests_debuginfo else 'link_stripped'
|
||||
|
||||
# Strip if debuginfo is disabled, otherwise we end up with partial
|
||||
# debug info from the libraries we static link with
|
||||
@@ -1954,7 +1957,8 @@ with open(buildfile, 'w') as f:
|
||||
# So we strip the tests by default; The user can very
|
||||
# quickly re-link the test unstripped by adding a "_g"
|
||||
# to the test name, e.g., "ninja build/release/testname_g"
|
||||
f.write('build $builddir/{}/{}: {}.{} {} | {} {}\n'.format(mode, binary, tests_link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep))
|
||||
link_rule = perf_tests_link_rule if binary.startswith('test/perf/') else tests_link_rule
|
||||
f.write('build $builddir/{}/{}: {}.{} {} | {} {}\n'.format(mode, binary, link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep))
|
||||
f.write(' libs = {}\n'.format(local_libs))
|
||||
f.write('build $builddir/{}/{}_g: {}.{} {} | {} {}\n'.format(mode, binary, regular_link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep))
|
||||
f.write(' libs = {}\n'.format(local_libs))
|
||||
@@ -2070,7 +2074,8 @@ with open(buildfile, 'w') as f:
|
||||
f.write('build {}: cxx.{} {} || {}\n'.format(obj, mode, cc, ' '.join(serializers)))
|
||||
if cc.endswith('Parser.cpp'):
|
||||
# Unoptimized parsers end up using huge amounts of stack space and overflowing their stack
|
||||
flags = '-O1'
|
||||
flags = '-O1' if modes[mode]['optimization-level'] in ['0', 'g', 's'] else ''
|
||||
|
||||
if has_sanitize_address_use_after_scope:
|
||||
flags += ' -fno-sanitize-address-use-after-scope'
|
||||
f.write(' obj_cxxflags = %s\n' % flags)
|
||||
|
||||
@@ -1396,7 +1396,7 @@ serviceLevelOrRoleName returns [sstring name]
|
||||
std::transform($name.begin(), $name.end(), $name.begin(), ::tolower); }
|
||||
| t=STRING_LITERAL { $name = sstring($t.text); }
|
||||
| t=QUOTED_NAME { $name = sstring($t.text); }
|
||||
| k=unreserved_keyword { $name = sstring($t.text);
|
||||
| k=unreserved_keyword { $name = k;
|
||||
std::transform($name.begin(), $name.end(), $name.begin(), ::tolower);}
|
||||
| QMARK {add_recognition_error("Bind variables cannot be used for service levels or role names");}
|
||||
;
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
|
||||
#include "cql3/attributes.hh"
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include <optional>
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
@@ -56,16 +57,16 @@ int64_t attributes::get_timestamp(int64_t now, const query_options& options) {
|
||||
}
|
||||
}
|
||||
|
||||
int32_t attributes::get_time_to_live(const query_options& options) {
|
||||
std::optional<int32_t> attributes::get_time_to_live(const query_options& options) {
|
||||
if (!_time_to_live.has_value())
|
||||
return 0;
|
||||
return std::nullopt;
|
||||
|
||||
cql3::raw_value tval = expr::evaluate(*_time_to_live, options);
|
||||
if (tval.is_null()) {
|
||||
throw exceptions::invalid_request_exception("Invalid null value of TTL");
|
||||
}
|
||||
if (tval.is_unset_value()) {
|
||||
return 0;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
int32_t ttl;
|
||||
|
||||
@@ -42,7 +42,7 @@ public:
|
||||
|
||||
int64_t get_timestamp(int64_t now, const query_options& options);
|
||||
|
||||
int32_t get_time_to_live(const query_options& options);
|
||||
std::optional<int32_t> get_time_to_live(const query_options& options);
|
||||
|
||||
db::timeout_clock::duration get_timeout(const query_options& options) const;
|
||||
|
||||
|
||||
@@ -1457,7 +1457,7 @@ expression search_and_replace(const expression& e,
|
||||
};
|
||||
},
|
||||
[&] (const binary_operator& oper) -> expression {
|
||||
return binary_operator(recurse(oper.lhs), oper.op, recurse(oper.rhs));
|
||||
return binary_operator(recurse(oper.lhs), oper.op, recurse(oper.rhs), oper.order);
|
||||
},
|
||||
[&] (const column_mutation_attribute& cma) -> expression {
|
||||
return column_mutation_attribute{cma.kind, recurse(cma.column)};
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
#include "db/config.hh"
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
#include "hashers.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
@@ -599,6 +600,14 @@ query_processor::get_statement(const sstring_view& query, const service::client_
|
||||
std::unique_ptr<raw::parsed_statement>
|
||||
query_processor::parse_statement(const sstring_view& query) {
|
||||
try {
|
||||
{
|
||||
const char* error_injection_key = "query_processor-parse_statement-test_failure";
|
||||
utils::get_local_injector().inject(error_injection_key, [&]() {
|
||||
if (query.find(error_injection_key) != sstring_view::npos) {
|
||||
throw std::runtime_error(error_injection_key);
|
||||
}
|
||||
});
|
||||
}
|
||||
auto statement = util::do_with_parser(query, std::mem_fn(&cql3_parser::CqlParser::query));
|
||||
if (!statement) {
|
||||
throw exceptions::syntax_exception("Parsing failed");
|
||||
|
||||
@@ -81,7 +81,7 @@ public:
|
||||
|
||||
virtual sstring assignment_testable_source_context() const override {
|
||||
auto&& name = _type->field_name(_field);
|
||||
auto sname = sstring(reinterpret_cast<const char*>(name.begin(), name.size()));
|
||||
auto sname = std::string_view(reinterpret_cast<const char*>(name.data()), name.size());
|
||||
return format("{}.{}", _selected, sname);
|
||||
}
|
||||
|
||||
|
||||
@@ -435,7 +435,7 @@ bool result_set_builder::restrictions_filter::do_filter(const selection& selecti
|
||||
clustering_key_prefix ckey = clustering_key_prefix::from_exploded(clustering_key);
|
||||
// FIXME: push to upper layer so it happens once per row
|
||||
auto static_and_regular_columns = expr::get_non_pk_values(selection, static_row, row);
|
||||
return expr::is_satisfied_by(
|
||||
bool multi_col_clustering_satisfied = expr::is_satisfied_by(
|
||||
clustering_columns_restrictions,
|
||||
expr::evaluation_inputs{
|
||||
.partition_key = &partition_key,
|
||||
@@ -444,6 +444,9 @@ bool result_set_builder::restrictions_filter::do_filter(const selection& selecti
|
||||
.selection = &selection,
|
||||
.options = &_options,
|
||||
});
|
||||
if (!multi_col_clustering_satisfied) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
auto static_row_iterator = static_row.iterator();
|
||||
|
||||
@@ -261,6 +261,10 @@ future<shared_ptr<cql_transport::messages::result_message>> batch_statement::do_
|
||||
if (options.getSerialConsistency() == null)
|
||||
throw new InvalidRequestException("Invalid empty serial consistency level");
|
||||
#endif
|
||||
for (size_t i = 0; i < _statements.size(); ++i) {
|
||||
_statements[i].statement->validate_primary_key_restrictions(options.for_statement(i));
|
||||
}
|
||||
|
||||
if (_has_conditions) {
|
||||
++_stats.cas_batches;
|
||||
_stats.statements_in_cas_batches += _statements.size();
|
||||
|
||||
@@ -119,6 +119,9 @@ std::optional<mutation> cas_request::apply(foreign_ptr<lw_shared_ptr<query::resu
|
||||
|
||||
const update_parameters::prefetch_data::row* cas_request::find_old_row(const cas_row_update& op) const {
|
||||
static const clustering_key empty_ckey = clustering_key::make_empty();
|
||||
if (_key.empty()) {
|
||||
throw exceptions::invalid_request_exception("partition key ranges empty - probably caused by an unset value");
|
||||
}
|
||||
const partition_key& pkey = _key.front().start()->value().key().value();
|
||||
// We must ignore statement clustering column restriction when
|
||||
// choosing a row to check the conditions. If there is no
|
||||
@@ -130,6 +133,9 @@ const update_parameters::prefetch_data::row* cas_request::find_old_row(const cas
|
||||
// CREATE TABLE t(p int, c int, s int static, v int, PRIMARY KEY(p, c));
|
||||
// INSERT INTO t(p, s) VALUES(1, 1);
|
||||
// UPDATE t SET v=1 WHERE p=1 AND c=1 IF s=1;
|
||||
if (op.ranges.empty()) {
|
||||
throw exceptions::invalid_request_exception("clustering key ranges empty - probably caused by an unset value");
|
||||
}
|
||||
const clustering_key& ckey = op.ranges.front().start() ? op.ranges.front().start()->value() : empty_ckey;
|
||||
auto row = _rows.find_row(pkey, ckey);
|
||||
if (row == nullptr && !ckey.is_empty() &&
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include "tombstone_gc.hh"
|
||||
#include "db/per_partition_rate_limit_extension.hh"
|
||||
#include "db/per_partition_rate_limit_options.hh"
|
||||
#include "utils/bloom_calculations.hh"
|
||||
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
@@ -152,6 +153,16 @@ void cf_prop_defs::validate(const data_dictionary::database db, sstring ks_name,
|
||||
throw exceptions::configuration_exception(KW_MAX_INDEX_INTERVAL + " must be greater than " + KW_MIN_INDEX_INTERVAL);
|
||||
}
|
||||
|
||||
if (get_simple(KW_BF_FP_CHANCE)) {
|
||||
double bloom_filter_fp_chance = get_double(KW_BF_FP_CHANCE, 0/*not used*/);
|
||||
double min_bloom_filter_fp_chance = utils::bloom_calculations::min_supported_bloom_filter_fp_chance();
|
||||
if (bloom_filter_fp_chance <= min_bloom_filter_fp_chance || bloom_filter_fp_chance > 1.0) {
|
||||
throw exceptions::configuration_exception(format(
|
||||
"{} must be larger than {} and less than or equal to 1.0 (got {})",
|
||||
KW_BF_FP_CHANCE, min_bloom_filter_fp_chance, bloom_filter_fp_chance));
|
||||
}
|
||||
}
|
||||
|
||||
speculative_retry::from_sstring(get_string(KW_SPECULATIVE_RETRY, speculative_retry(speculative_retry::type::NONE, 0).to_sstring()));
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "cql3/util.hh"
|
||||
#include "validation.hh"
|
||||
#include "db/consistency_level_validations.hh"
|
||||
#include <optional>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
@@ -92,8 +93,9 @@ bool modification_statement::is_timestamp_set() const {
|
||||
return attrs->is_timestamp_set();
|
||||
}
|
||||
|
||||
gc_clock::duration modification_statement::get_time_to_live(const query_options& options) const {
|
||||
return gc_clock::duration(attrs->get_time_to_live(options));
|
||||
std::optional<gc_clock::duration> modification_statement::get_time_to_live(const query_options& options) const {
|
||||
std::optional<int32_t> ttl = attrs->get_time_to_live(options);
|
||||
return ttl ? std::make_optional<gc_clock::duration>(*ttl) : std::nullopt;
|
||||
}
|
||||
|
||||
future<> modification_statement::check_access(query_processor& qp, const service::client_state& state) const {
|
||||
@@ -109,9 +111,6 @@ future<> modification_statement::check_access(query_processor& qp, const service
|
||||
|
||||
future<std::vector<mutation>>
|
||||
modification_statement::get_mutations(query_processor& qp, const query_options& options, db::timeout_clock::time_point timeout, bool local, int64_t now, service::query_state& qs) const {
|
||||
if (_restrictions->range_or_slice_eq_null(options)) { // See #7852 and #9290.
|
||||
throw exceptions::invalid_request_exception("Invalid null value in condition for a key column");
|
||||
}
|
||||
auto cl = options.get_consistency();
|
||||
auto json_cache = maybe_prepare_json_cache(options);
|
||||
auto keys = build_partition_keys(options, json_cache);
|
||||
@@ -250,6 +249,12 @@ modification_statement::execute_without_checking_exception_message(query_process
|
||||
return modify_stage(this, seastar::ref(qp), seastar::ref(qs), seastar::cref(options));
|
||||
}
|
||||
|
||||
void modification_statement::validate_primary_key_restrictions(const query_options& options) const {
|
||||
if (_restrictions->range_or_slice_eq_null(options)) { // See #7852 and #9290.
|
||||
throw exceptions::invalid_request_exception("Invalid null value in condition for a key column");
|
||||
}
|
||||
}
|
||||
|
||||
future<::shared_ptr<cql_transport::messages::result_message>>
|
||||
modification_statement::do_execute(query_processor& qp, service::query_state& qs, const query_options& options) const {
|
||||
if (has_conditions() && options.get_protocol_version() == 1) {
|
||||
@@ -260,6 +265,8 @@ modification_statement::do_execute(query_processor& qp, service::query_state& qs
|
||||
|
||||
inc_cql_stats(qs.get_client_state().is_internal());
|
||||
|
||||
validate_primary_key_restrictions(options);
|
||||
|
||||
if (has_conditions()) {
|
||||
return execute_with_condition(qp, qs, options);
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ public:
|
||||
|
||||
bool is_timestamp_set() const;
|
||||
|
||||
gc_clock::duration get_time_to_live(const query_options& options) const;
|
||||
std::optional<gc_clock::duration> get_time_to_live(const query_options& options) const;
|
||||
|
||||
virtual future<> check_access(query_processor& qp, const service::client_state& state) const override;
|
||||
|
||||
@@ -229,6 +229,8 @@ public:
|
||||
// True if this statement needs to read only static column values to check if it can be applied.
|
||||
bool has_only_static_column_conditions() const { return !_has_regular_column_conditions && _has_static_column_conditions; }
|
||||
|
||||
void validate_primary_key_restrictions(const query_options& options) const;
|
||||
|
||||
virtual future<::shared_ptr<cql_transport::messages::result_message>>
|
||||
execute(query_processor& qp, service::query_state& qs, const query_options& options) const override;
|
||||
|
||||
|
||||
@@ -1499,7 +1499,7 @@ parallelized_select_statement::do_execute(
|
||||
|
||||
command->slice.options.set<query::partition_slice::option::allow_short_read>();
|
||||
auto timeout_duration = get_timeout(state.get_client_state(), options);
|
||||
auto timeout = db::timeout_clock::now() + timeout_duration;
|
||||
auto timeout = lowres_system_clock::now() + timeout_duration;
|
||||
auto reductions = _selection->get_reductions();
|
||||
|
||||
query::forward_request req = {
|
||||
@@ -1571,11 +1571,16 @@ void select_statement::maybe_jsonize_select_clause(data_dictionary::database db,
|
||||
std::vector<data_type> selector_types;
|
||||
std::vector<const column_definition*> defs;
|
||||
selector_names.reserve(_select_clause.size());
|
||||
selector_types.reserve(_select_clause.size());
|
||||
auto selectables = selection::raw_selector::to_selectables(_select_clause, *schema);
|
||||
selection::selector_factories factories(selection::raw_selector::to_selectables(_select_clause, *schema), db, schema, defs);
|
||||
auto selectors = factories.new_instances();
|
||||
for (size_t i = 0; i < selectors.size(); ++i) {
|
||||
selector_names.push_back(selectables[i]->to_string());
|
||||
if (_select_clause[i]->alias) {
|
||||
selector_names.push_back(_select_clause[i]->alias->to_string());
|
||||
} else {
|
||||
selector_names.push_back(selectables[i]->to_string());
|
||||
}
|
||||
selector_types.push_back(selectors[i]->get_type());
|
||||
}
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@ public:
|
||||
};
|
||||
// Note: value (mutation) only required to contain the rows we are interested in
|
||||
private:
|
||||
const gc_clock::duration _ttl;
|
||||
const std::optional<gc_clock::duration> _ttl;
|
||||
// For operations that require a read-before-write, stores prefetched cell values.
|
||||
// For CAS statements, stores values of conditioned columns.
|
||||
// Is a reference to an outside prefetch_data container since a CAS BATCH statement
|
||||
@@ -106,7 +106,7 @@ public:
|
||||
const query_options& _options;
|
||||
|
||||
update_parameters(const schema_ptr schema_, const query_options& options,
|
||||
api::timestamp_type timestamp, gc_clock::duration ttl, const prefetch_data& prefetched)
|
||||
api::timestamp_type timestamp, std::optional<gc_clock::duration> ttl, const prefetch_data& prefetched)
|
||||
: _ttl(ttl)
|
||||
, _prefetched(prefetched)
|
||||
, _timestamp(timestamp)
|
||||
@@ -127,11 +127,7 @@ public:
|
||||
}
|
||||
|
||||
atomic_cell make_cell(const abstract_type& type, const raw_value_view& value, atomic_cell::collection_member cm = atomic_cell::collection_member::no) const {
|
||||
auto ttl = _ttl;
|
||||
|
||||
if (ttl.count() <= 0) {
|
||||
ttl = _schema->default_time_to_live();
|
||||
}
|
||||
auto ttl = this->ttl();
|
||||
|
||||
return value.with_value([&] (const FragmentedView auto& v) {
|
||||
if (ttl.count() > 0) {
|
||||
@@ -143,11 +139,7 @@ public:
|
||||
};
|
||||
|
||||
atomic_cell make_cell(const abstract_type& type, const managed_bytes_view& value, atomic_cell::collection_member cm = atomic_cell::collection_member::no) const {
|
||||
auto ttl = _ttl;
|
||||
|
||||
if (ttl.count() <= 0) {
|
||||
ttl = _schema->default_time_to_live();
|
||||
}
|
||||
auto ttl = this->ttl();
|
||||
|
||||
if (ttl.count() > 0) {
|
||||
return atomic_cell::make_live(type, _timestamp, value, _local_deletion_time + ttl, ttl, cm);
|
||||
@@ -169,7 +161,7 @@ public:
|
||||
}
|
||||
|
||||
gc_clock::duration ttl() const {
|
||||
return _ttl.count() > 0 ? _ttl : _schema->default_time_to_live();
|
||||
return _ttl.value_or(_schema->default_time_to_live());
|
||||
}
|
||||
|
||||
gc_clock::time_point expiry() const {
|
||||
|
||||
@@ -2031,7 +2031,7 @@ future<> db::commitlog::segment_manager::shutdown() {
|
||||
}
|
||||
}
|
||||
co_await _shutdown_promise->get_shared_future();
|
||||
clogger.info("Commitlog shutdown complete");
|
||||
clogger.debug("Commitlog shutdown complete");
|
||||
}
|
||||
|
||||
void db::commitlog::segment_manager::add_file_to_dispose(named_file f, dispose_mode mode) {
|
||||
@@ -2094,6 +2094,9 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
clogger.debug("Discarding segments {}", ftd);
|
||||
|
||||
for (auto& [f, mode] : ftd) {
|
||||
// `f.remove_file()` resets known_size to 0, so remember the size here,
|
||||
// in order to subtract it from total_size_on_disk accurately.
|
||||
size_t size = f.known_size();
|
||||
try {
|
||||
if (f) {
|
||||
co_await f.close();
|
||||
@@ -2110,7 +2113,6 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
}
|
||||
}
|
||||
|
||||
auto size = f.known_size();
|
||||
auto usage = totals.total_size_on_disk;
|
||||
auto next_usage = usage - size;
|
||||
|
||||
@@ -2144,7 +2146,7 @@ future<> db::commitlog::segment_manager::do_pending_deletes() {
|
||||
// or had such an exception that we consider the file dead
|
||||
// anyway. In either case we _remove_ the file size from
|
||||
// footprint, because it is no longer our problem.
|
||||
totals.total_size_on_disk -= f.known_size();
|
||||
totals.total_size_on_disk -= size;
|
||||
}
|
||||
|
||||
// #8376 - if we had an error in recycling (disk rename?), and no elements
|
||||
|
||||
@@ -899,6 +899,8 @@ db::config::config(std::shared_ptr<db::extensions> exts)
|
||||
"Ignore truncation record stored in system tables as if tables were never truncated.")
|
||||
, force_schema_commit_log(this, "force_schema_commit_log", value_status::Used, false,
|
||||
"Use separate schema commit log unconditionally rater than after restart following discovery of cluster-wide support for it.")
|
||||
, cache_index_pages(this, "cache_index_pages", liveness::LiveUpdate, value_status::Used, true,
|
||||
"Keep SSTable index pages in the global cache after a SSTable read. Expected to improve performance for workloads with big partitions, but may degrade performance for workloads with small partitions.")
|
||||
, default_log_level(this, "default_log_level", value_status::Used)
|
||||
, logger_log_level(this, "logger_log_level", value_status::Used)
|
||||
, log_to_stdout(this, "log_to_stdout", value_status::Used)
|
||||
|
||||
@@ -379,6 +379,8 @@ public:
|
||||
named_value<bool> ignore_truncation_record;
|
||||
named_value<bool> force_schema_commit_log;
|
||||
|
||||
named_value<bool> cache_index_pages;
|
||||
|
||||
seastar::logging_settings logging_settings(const log_cli::options&) const;
|
||||
|
||||
const db::extensions& extensions() const;
|
||||
|
||||
@@ -2020,6 +2020,33 @@ std::vector<shared_ptr<cql3::functions::user_function>> create_functions_from_sc
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<shared_ptr<cql3::functions::user_aggregate>> create_aggregates_from_schema_partition(
|
||||
replica::database& db, lw_shared_ptr<query::result_set> result, lw_shared_ptr<query::result_set> scylla_result) {
|
||||
std::unordered_multimap<sstring, const query::result_set_row*> scylla_aggs;
|
||||
if (scylla_result) {
|
||||
for (const auto& scylla_row : scylla_result->rows()) {
|
||||
auto scylla_agg_name = scylla_row.get_nonnull<sstring>("aggregate_name");
|
||||
scylla_aggs.emplace(scylla_agg_name, &scylla_row);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<shared_ptr<cql3::functions::user_aggregate>> ret;
|
||||
for (const auto& row : result->rows()) {
|
||||
auto agg_name = row.get_nonnull<sstring>("aggregate_name");
|
||||
auto agg_args = read_arg_types(db, row, row.get_nonnull<sstring>("keyspace_name"));
|
||||
const query::result_set_row *scylla_row_ptr = nullptr;
|
||||
for (auto [it, end] = scylla_aggs.equal_range(agg_name); it != end; ++it) {
|
||||
auto scylla_agg_args = read_arg_types(db, *it->second, it->second->get_nonnull<sstring>("keyspace_name"));
|
||||
if (agg_args == scylla_agg_args) {
|
||||
scylla_row_ptr = it->second;
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret.emplace_back(create_aggregate(db, row, scylla_row_ptr));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* User type metadata serialization/deserialization
|
||||
*/
|
||||
|
||||
@@ -209,6 +209,8 @@ std::vector<user_type> create_types_from_schema_partition(keyspace_metadata& ks,
|
||||
|
||||
std::vector<shared_ptr<cql3::functions::user_function>> create_functions_from_schema_partition(replica::database& db, lw_shared_ptr<query::result_set> result);
|
||||
|
||||
std::vector<shared_ptr<cql3::functions::user_aggregate>> create_aggregates_from_schema_partition(replica::database& db, lw_shared_ptr<query::result_set> result, lw_shared_ptr<query::result_set> scylla_result);
|
||||
|
||||
std::vector<mutation> make_create_function_mutations(shared_ptr<cql3::functions::user_function> func, api::timestamp_type timestamp);
|
||||
|
||||
std::vector<mutation> make_drop_function_mutations(shared_ptr<cql3::functions::user_function> func, api::timestamp_type timestamp);
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
#include "log.hh"
|
||||
#include "utils/latency.hh"
|
||||
|
||||
#include <seastar/core/when_all.hh>
|
||||
|
||||
static logging::logger mylog("row_locking");
|
||||
|
||||
row_locker::row_locker(schema_ptr s)
|
||||
@@ -76,35 +74,32 @@ row_locker::lock_pk(const dht::decorated_key& pk, bool exclusive, db::timeout_cl
|
||||
future<row_locker::lock_holder>
|
||||
row_locker::lock_ck(const dht::decorated_key& pk, const clustering_key_prefix& cpk, bool exclusive, db::timeout_clock::time_point timeout, stats& stats) {
|
||||
mylog.debug("taking shared lock on partition {}, and {} lock on row {} in it", pk, (exclusive ? "exclusive" : "shared"), cpk);
|
||||
auto ck = cpk;
|
||||
// Create a two-level lock entry for the partition if it doesn't exist already.
|
||||
auto i = _two_level_locks.try_emplace(pk, this).first;
|
||||
// The two-level lock entry we've just created is guaranteed to be kept alive as long as it's locked.
|
||||
// Initiating read locking in the background below ensures that even if the two-level lock is currently
|
||||
// write-locked, releasing the write-lock will synchronously engage any waiting
|
||||
// locks and will keep the entry alive.
|
||||
future<lock_type::holder> lock_partition = i->second._partition_lock.hold_read_lock(timeout);
|
||||
auto j = i->second._row_locks.find(cpk);
|
||||
if (j == i->second._row_locks.end()) {
|
||||
// Not yet locked, need to create the lock. This makes a copy of cpk.
|
||||
try {
|
||||
j = i->second._row_locks.emplace(cpk, lock_type()).first;
|
||||
} catch(...) {
|
||||
// If this emplace() failed, e.g., out of memory, we fail. We
|
||||
// could do nothing - the partition lock we already started
|
||||
// taking will be unlocked automatically after being locked.
|
||||
// But it's better form to wait for the work we started, and it
|
||||
// will also allow us to remove the hash-table row we added.
|
||||
return lock_partition.then([ex = std::current_exception()] (auto lock) {
|
||||
// The lock is automatically released when "lock" goes out of scope.
|
||||
// TODO: unlock (lock = {}) now, search for the partition in the
|
||||
// hash table (we know it's still there, because we held the lock until
|
||||
// now) and remove the unused lock from the hash table if still unused.
|
||||
return make_exception_future<row_locker::lock_holder>(std::current_exception());
|
||||
});
|
||||
}
|
||||
}
|
||||
single_lock_stats &single_lock_stats = exclusive ? stats.exclusive_row : stats.shared_row;
|
||||
single_lock_stats.operations_currently_waiting_for_lock++;
|
||||
utils::latency_counter waiting_latency;
|
||||
waiting_latency.start();
|
||||
future<lock_type::holder> lock_row = exclusive ? j->second.hold_write_lock(timeout) : j->second.hold_read_lock(timeout);
|
||||
return when_all_succeed(std::move(lock_partition), std::move(lock_row))
|
||||
.then_unpack([this, pk = &i->first, cpk = &j->first, exclusive, &single_lock_stats, waiting_latency = std::move(waiting_latency)] (auto lock1, auto lock2) mutable {
|
||||
return lock_partition.then([this, pk = &i->first, row_locks = &i->second._row_locks, ck = std::move(ck), exclusive, &single_lock_stats, waiting_latency = std::move(waiting_latency), timeout] (auto lock1) mutable {
|
||||
auto j = row_locks->find(ck);
|
||||
if (j == row_locks->end()) {
|
||||
// Not yet locked, need to create the lock.
|
||||
j = row_locks->emplace(std::move(ck), lock_type()).first;
|
||||
}
|
||||
auto* cpk = &j->first;
|
||||
auto& row_lock = j->second;
|
||||
// Like to the two-level lock entry above, the row_lock entry we've just created
|
||||
// is guaranteed to be kept alive as long as it's locked.
|
||||
// Initiating read/write locking in the background below ensures that.
|
||||
auto lock_row = exclusive ? row_lock.hold_write_lock(timeout) : row_lock.hold_read_lock(timeout);
|
||||
return lock_row.then([this, pk, cpk, exclusive, &single_lock_stats, waiting_latency = std::move(waiting_latency), lock1 = std::move(lock1)] (auto lock2) mutable {
|
||||
// FIXME: indentation
|
||||
lock1.release();
|
||||
lock2.release();
|
||||
waiting_latency.stop();
|
||||
@@ -112,6 +107,7 @@ row_locker::lock_ck(const dht::decorated_key& pk, const clustering_key_prefix& c
|
||||
single_lock_stats.lock_acquisitions++;
|
||||
single_lock_stats.operations_currently_waiting_for_lock--;
|
||||
return lock_holder(this, pk, cpk, exclusive);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -123,6 +123,9 @@ const column_definition* view_info::view_column(const column_definition& base_de
|
||||
|
||||
void view_info::set_base_info(db::view::base_info_ptr base_info) {
|
||||
_base_info = std::move(base_info);
|
||||
// Forget the cached objects which may refer to the base schema.
|
||||
_select_statement = nullptr;
|
||||
_partition_slice = std::nullopt;
|
||||
}
|
||||
|
||||
// A constructor for a base info that can facilitate reads and writes from the materialized view.
|
||||
@@ -868,13 +871,18 @@ void view_updates::generate_update(
|
||||
bool same_row = true;
|
||||
for (auto col_id : col_ids) {
|
||||
auto* after = update.cells().find_cell(col_id);
|
||||
// Note: multi-cell columns can't be part of the primary key.
|
||||
auto& cdef = _base->regular_column_at(col_id);
|
||||
if (existing) {
|
||||
auto* before = existing->cells().find_cell(col_id);
|
||||
// Note that this cell is necessarily atomic, because col_ids are
|
||||
// view key columns, and keys must be atomic.
|
||||
if (before && before->as_atomic_cell(cdef).is_live()) {
|
||||
if (after && after->as_atomic_cell(cdef).is_live()) {
|
||||
auto cmp = compare_atomic_cell_for_merge(before->as_atomic_cell(cdef), after->as_atomic_cell(cdef));
|
||||
// We need to compare just the values of the keys, not
|
||||
// metadata like the timestamp. This is because below,
|
||||
// if the old and new view row have the same key, we need
|
||||
// to be sure to reach the update_entry() case.
|
||||
auto cmp = compare_unsigned(before->as_atomic_cell(cdef).value(), after->as_atomic_cell(cdef).value());
|
||||
if (cmp != 0) {
|
||||
same_row = false;
|
||||
}
|
||||
@@ -894,7 +902,13 @@ void view_updates::generate_update(
|
||||
if (same_row) {
|
||||
update_entry(base_key, update, *existing, now);
|
||||
} else {
|
||||
replace_entry(base_key, update, *existing, now);
|
||||
// This code doesn't work if the old and new view row have the
|
||||
// same key, because if they do we get both data and tombstone
|
||||
// for the same timestamp (now) and the tombstone wins. This
|
||||
// is why we need the "same_row" case above - it's not just a
|
||||
// performance optimization.
|
||||
delete_old_entry(base_key, *existing, update, now);
|
||||
create_entry(base_key, update, now);
|
||||
}
|
||||
} else {
|
||||
delete_old_entry(base_key, *existing, update, now);
|
||||
@@ -938,8 +952,12 @@ future<stop_iteration> view_update_builder::stop() const {
|
||||
return make_ready_future<stop_iteration>(stop_iteration::yes);
|
||||
}
|
||||
|
||||
future<utils::chunked_vector<frozen_mutation_and_schema>> view_update_builder::build_some() {
|
||||
future<std::optional<utils::chunked_vector<frozen_mutation_and_schema>>> view_update_builder::build_some() {
|
||||
(void)co_await advance_all();
|
||||
if (!_update && !_existing) {
|
||||
// Tell the caller there is no more data to build.
|
||||
co_return std::nullopt;
|
||||
}
|
||||
bool do_advance_updates = false;
|
||||
bool do_advance_existings = false;
|
||||
if (_update && _update->is_partition_start()) {
|
||||
@@ -2056,15 +2074,20 @@ public:
|
||||
// Called in the context of a seastar::thread.
|
||||
void view_builder::execute(build_step& step, exponential_backoff_retry r) {
|
||||
gc_clock::time_point now = gc_clock::now();
|
||||
auto consumer = compact_for_query_v2<view_builder::consumer>(
|
||||
auto compaction_state = make_lw_shared<compact_for_query_state_v2>(
|
||||
*step.reader.schema(),
|
||||
now,
|
||||
step.pslice,
|
||||
batch_size,
|
||||
query::max_partitions,
|
||||
view_builder::consumer{*this, step, now});
|
||||
consumer.consume_new_partition(step.current_key); // Initialize the state in case we're resuming a partition
|
||||
query::max_partitions);
|
||||
auto consumer = compact_for_query_v2<view_builder::consumer>(compaction_state, view_builder::consumer{*this, step, now});
|
||||
auto built = step.reader.consume_in_thread(std::move(consumer));
|
||||
if (auto ds = std::move(*compaction_state).detach_state()) {
|
||||
if (ds->current_tombstone) {
|
||||
step.reader.unpop_mutation_fragment(mutation_fragment_v2(*step.reader.schema(), step.reader.permit(), std::move(*ds->current_tombstone)));
|
||||
}
|
||||
step.reader.unpop_mutation_fragment(mutation_fragment_v2(*step.reader.schema(), step.reader.permit(), std::move(ds->partition_start)));
|
||||
}
|
||||
|
||||
_as.check();
|
||||
|
||||
@@ -2146,24 +2169,28 @@ update_backlog node_update_backlog::add_fetch(unsigned shard, update_backlog bac
|
||||
return std::max(backlog, _max.load(std::memory_order_relaxed));
|
||||
}
|
||||
|
||||
future<bool> check_view_build_ongoing(db::system_distributed_keyspace& sys_dist_ks, const sstring& ks_name, const sstring& cf_name) {
|
||||
return sys_dist_ks.view_status(ks_name, cf_name).then([] (std::unordered_map<utils::UUID, sstring>&& view_statuses) {
|
||||
return boost::algorithm::any_of(view_statuses | boost::adaptors::map_values, [] (const sstring& view_status) {
|
||||
return view_status == "STARTED";
|
||||
future<bool> check_view_build_ongoing(db::system_distributed_keyspace& sys_dist_ks, const locator::token_metadata& tm, const sstring& ks_name,
|
||||
const sstring& cf_name) {
|
||||
using view_statuses_type = std::unordered_map<utils::UUID, sstring>;
|
||||
return sys_dist_ks.view_status(ks_name, cf_name).then([&tm] (view_statuses_type&& view_statuses) {
|
||||
return boost::algorithm::any_of(view_statuses, [&tm] (const view_statuses_type::value_type& view_status) {
|
||||
// Only consider status of known hosts.
|
||||
return view_status.second == "STARTED" && tm.get_endpoint_for_host_id(view_status.first);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const replica::table& t, streaming::stream_reason reason) {
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const locator::token_metadata& tm, const replica::table& t,
|
||||
streaming::stream_reason reason) {
|
||||
if (is_internal_keyspace(t.schema()->ks_name())) {
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
if (reason == streaming::stream_reason::repair && !t.views().empty()) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
return do_with(t.views(), [&sys_dist_ks] (auto& views) {
|
||||
return do_with(t.views(), [&sys_dist_ks, &tm] (auto& views) {
|
||||
return map_reduce(views,
|
||||
[&sys_dist_ks] (const view_ptr& view) { return check_view_build_ongoing(sys_dist_ks, view->ks_name(), view->cf_name()); },
|
||||
[&sys_dist_ks, &tm] (const view_ptr& view) { return check_view_build_ongoing(sys_dist_ks, tm, view->ks_name(), view->cf_name()); },
|
||||
false,
|
||||
std::logical_or<bool>());
|
||||
});
|
||||
|
||||
@@ -154,10 +154,7 @@ private:
|
||||
void delete_old_entry(const partition_key& base_key, const clustering_row& existing, const clustering_row& update, gc_clock::time_point now);
|
||||
void do_delete_old_entry(const partition_key& base_key, const clustering_row& existing, const clustering_row& update, gc_clock::time_point now);
|
||||
void update_entry(const partition_key& base_key, const clustering_row& update, const clustering_row& existing, gc_clock::time_point now);
|
||||
void replace_entry(const partition_key& base_key, const clustering_row& update, const clustering_row& existing, gc_clock::time_point now) {
|
||||
create_entry(base_key, update, now);
|
||||
delete_old_entry(base_key, existing, update, now);
|
||||
}
|
||||
void update_entry_for_computed_column(const partition_key& base_key, const clustering_row& update, const std::optional<clustering_row>& existing, gc_clock::time_point now);
|
||||
};
|
||||
|
||||
class view_update_builder {
|
||||
@@ -188,7 +185,15 @@ public:
|
||||
}
|
||||
view_update_builder(view_update_builder&& other) noexcept = default;
|
||||
|
||||
future<utils::chunked_vector<frozen_mutation_and_schema>> build_some();
|
||||
|
||||
// build_some() works on batches of 100 (max_rows_for_view_updates)
|
||||
// updated rows, but can_skip_view_updates() can decide that some of
|
||||
// these rows do not effect the view, and as a result build_some() can
|
||||
// fewer than 100 rows - in extreme cases even zero (see issue #12297).
|
||||
// So we can't use an empty returned vector to signify that the view
|
||||
// update building is done - and we wrap the return value in an
|
||||
// std::optional, which is disengaged when the iteration is done.
|
||||
future<std::optional<utils::chunked_vector<frozen_mutation_and_schema>>> build_some();
|
||||
|
||||
future<> close() noexcept;
|
||||
|
||||
|
||||
@@ -22,9 +22,13 @@ class system_distributed_keyspace;
|
||||
|
||||
}
|
||||
|
||||
namespace locator {
|
||||
class token_metadata;
|
||||
}
|
||||
|
||||
namespace db::view {
|
||||
|
||||
future<bool> check_view_build_ongoing(db::system_distributed_keyspace& sys_dist_ks, const sstring& ks_name, const sstring& cf_name);
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const replica::table& t, streaming::stream_reason reason);
|
||||
future<bool> check_needs_view_update_path(db::system_distributed_keyspace& sys_dist_ks, const locator::token_metadata& tm, const replica::table& t,
|
||||
streaming::stream_reason reason);
|
||||
|
||||
}
|
||||
|
||||
3
dist/common/scripts/scylla_coredump_setup
vendored
3
dist/common/scripts/scylla_coredump_setup
vendored
@@ -42,7 +42,8 @@ if __name__ == '__main__':
|
||||
if systemd_unit.available('systemd-coredump@.service'):
|
||||
dropin = '''
|
||||
[Service]
|
||||
TimeoutStartSec=infinity
|
||||
RuntimeMaxSec=infinity
|
||||
TimeoutSec=infinity
|
||||
'''[1:-1]
|
||||
os.makedirs('/etc/systemd/system/systemd-coredump@.service.d', exist_ok=True)
|
||||
with open('/etc/systemd/system/systemd-coredump@.service.d/timeout.conf', 'w') as f:
|
||||
|
||||
23
dist/common/scripts/scylla_raid_setup
vendored
23
dist/common/scripts/scylla_raid_setup
vendored
@@ -16,7 +16,7 @@ import stat
|
||||
import distro
|
||||
from pathlib import Path
|
||||
from scylla_util import *
|
||||
from subprocess import run
|
||||
from subprocess import run, SubprocessError
|
||||
|
||||
if __name__ == '__main__':
|
||||
if os.getuid() > 0:
|
||||
@@ -137,7 +137,9 @@ if __name__ == '__main__':
|
||||
# stalling. The minimum block size for crc enabled filesystems is 1024,
|
||||
# and it also cannot be smaller than the sector size.
|
||||
block_size = max(1024, sector_size)
|
||||
run('udevadm settle', shell=True, check=True)
|
||||
run(f'mkfs.xfs -b size={block_size} {fsdev} -f -K', shell=True, check=True)
|
||||
run('udevadm settle', shell=True, check=True)
|
||||
|
||||
if is_debian_variant():
|
||||
confpath = '/etc/mdadm/mdadm.conf'
|
||||
@@ -153,6 +155,11 @@ if __name__ == '__main__':
|
||||
os.makedirs(mount_at, exist_ok=True)
|
||||
|
||||
uuid = out(f'blkid -s UUID -o value {fsdev}')
|
||||
if not uuid:
|
||||
raise Exception(f'Failed to get UUID of {fsdev}')
|
||||
|
||||
uuidpath = f'/dev/disk/by-uuid/{uuid}'
|
||||
|
||||
after = 'local-fs.target'
|
||||
wants = ''
|
||||
if raid and args.raid_level != '0':
|
||||
@@ -169,7 +176,7 @@ After={after}{wants}
|
||||
DefaultDependencies=no
|
||||
|
||||
[Mount]
|
||||
What=/dev/disk/by-uuid/{uuid}
|
||||
What={uuidpath}
|
||||
Where={mount_at}
|
||||
Type=xfs
|
||||
Options=noatime{opt_discard}
|
||||
@@ -191,8 +198,16 @@ WantedBy=multi-user.target
|
||||
systemd_unit.reload()
|
||||
if args.raid_level != '0':
|
||||
md_service.start()
|
||||
mount = systemd_unit(mntunit_bn)
|
||||
mount.start()
|
||||
try:
|
||||
mount = systemd_unit(mntunit_bn)
|
||||
mount.start()
|
||||
except SubprocessError as e:
|
||||
if not os.path.exists(uuidpath):
|
||||
print(f'\nERROR: {uuidpath} is not found\n')
|
||||
elif not stat.S_ISBLK(os.stat(uuidpath).st_mode):
|
||||
print(f'\nERROR: {uuidpath} is not block device\n')
|
||||
raise e
|
||||
|
||||
if args.enable_on_nextboot:
|
||||
mount.enable()
|
||||
uid = pwd.getpwnam('scylla').pw_uid
|
||||
|
||||
4
dist/common/scripts/scylla_setup
vendored
4
dist/common/scripts/scylla_setup
vendored
@@ -214,7 +214,7 @@ if __name__ == '__main__':
|
||||
help='skip raid setup')
|
||||
parser.add_argument('--raid-level-5', action='store_true', default=False,
|
||||
help='use RAID5 for RAID volume')
|
||||
parser.add_argument('--online-discard', default=True,
|
||||
parser.add_argument('--online-discard', default=1, choices=[0, 1], type=int,
|
||||
help='Configure XFS to discard unused blocks as soon as files are deleted')
|
||||
parser.add_argument('--nic',
|
||||
help='specify NIC')
|
||||
@@ -458,7 +458,7 @@ if __name__ == '__main__':
|
||||
args.no_raid_setup = not raid_setup
|
||||
if raid_setup:
|
||||
level = '5' if raid_level_5 else '0'
|
||||
run_setup_script('RAID', f'scylla_raid_setup --disks {disks} --enable-on-nextboot --raid-level={level} --online-discard={int(online_discard)}')
|
||||
run_setup_script('RAID', f'scylla_raid_setup --disks {disks} --enable-on-nextboot --raid-level={level} --online-discard={online_discard}')
|
||||
|
||||
coredump_setup = interactive_ask_service('Do you want to enable coredumps?', 'Yes - sets up coredump to allow a post-mortem analysis of the Scylla state just prior to a crash. No - skips this step.', coredump_setup)
|
||||
args.no_coredump_setup = not coredump_setup
|
||||
|
||||
7
dist/docker/scyllasetup.py
vendored
7
dist/docker/scyllasetup.py
vendored
@@ -68,7 +68,12 @@ class ScyllaSetup:
|
||||
|
||||
def cqlshrc(self):
|
||||
home = os.environ['HOME']
|
||||
hostname = subprocess.check_output(['hostname', '-i']).decode('ascii').strip()
|
||||
if self._rpcAddress:
|
||||
hostname = self._rpcAddress
|
||||
elif self._listenAddress:
|
||||
hostname = self._listenAddress
|
||||
else:
|
||||
hostname = subprocess.check_output(['hostname', '-i']).decode('ascii').strip()
|
||||
with open("%s/.cqlshrc" % home, "w") as cqlshrc:
|
||||
cqlshrc.write("[connection]\nhostname = %s\n" % hostname)
|
||||
|
||||
|
||||
@@ -1,58 +1,10 @@
|
||||
### a dictionary of redirections
|
||||
#old path: new path
|
||||
|
||||
# removing the old Monitoring Stack documentation from the ScyllaDB docs
|
||||
|
||||
/stable/operating-scylla/monitoring/index.html: https://monitoring.docs.scylladb.com/stable/
|
||||
/stable/upgrade/upgrade-monitor/index.html: https://monitoring.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-monitor/upgrade-guide-from-monitoring-1.x-to-monitoring-2.x.html: https://monitoring.docs.scylladb.com/stable/upgrade/upgrade-guide-from-monitoring-1.x-to-monitoring-2.x.html
|
||||
/stable/upgrade/upgrade-monitor/upgrade-guide-from-monitoring-2.x-to-monitoring-2.y.html: https://monitoring.docs.scylladb.com/stable/upgrade/upgrade-guide-from-monitoring-2.x-to-monitoring-2.y.html
|
||||
/stable/upgrade/upgrade-monitor/upgrade-guide-from-monitoring-2.x-to-monitoring-3.y.html: https://monitoring.docs.scylladb.com/stable/upgrade/upgrade-guide-from-monitoring-2.x-to-monitoring-3.y.html
|
||||
/stable/upgrade/upgrade-monitor/upgrade-guide-from-monitoring-3.x-to-monitoring-3.y.html: https://monitoring.docs.scylladb.com/stable/upgrade/upgrade-guide-from-monitoring-3.x-to-monitoring-3.y.html
|
||||
|
||||
# removing the old Operator documentation from the ScyllaDB docs
|
||||
|
||||
/stable/operating-scylla/scylla-operator/index.html: https://operator.docs.scylladb.com/stable/
|
||||
|
||||
### removing the old Scylla Manager documentation from the ScyllaDB docs
|
||||
|
||||
/stable/operating-scylla/manager/index.html: https://manager.docs.scylladb.com/
|
||||
/stable/upgrade/upgrade-manager/index.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-maintenance-1.x.y-to-1.x.z/index.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-maintenance-1.x.y-to-1.x.z/upgrade-guide-from-manager-1.x.y-to-1.x.z-CentOS.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-maintenance-1.x.y-to-1.x.z/upgrade-guide-from-manager-1.x.y-to-1.x.z-ubuntu.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-manager-1.0.x-to-1.1.x.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-manager-1.1.x-to-1.2.x.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.2-to-1.3/index.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.2-to-1.3/upgrade-guide-from-manager-1.2.x-to-1.3.x-CentOS.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.2-to-1.3/upgrade-guide-from-manager-1.2.x-to-1.3.x-ubuntu.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.2-to-1.3/manager-metric-update-1.2-to-1.3.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.3-to-1.4/index.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.3-to-1.4/upgrade-guide-from-manager-1.3.x-to-1.4.x-CentOS.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.3-to-1.4/upgrade-guide-from-manager-1.3.x-to-1.4.x-ubuntu.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.3-to-1.4/manager-metric-update-1.3-to-1.4.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.4-to-2.0/index.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.4-to-2.0/upgrade-guide-from-manager-1.4.x-to-2.0.x.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-1.4-to-2.0/manager-metric-update-1.4-to-2.0.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-2.x.a-to-2.y.b/index.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-2.x.a-to-2.y.b/upgrade-2.x.a-to-2.y.b.html: https://manager.docs.scylladb.com/stable/upgrade/index.html
|
||||
/stable/upgrade/upgrade-manager/upgrade-guide-from-2.x.a-to-2.y.b/upgrade-row-level-repair.html: https://www.scylladb.com/2019/08/13/scylla-open-source-3-1-efficiently-maintaining-consistency-with-row-level-repair/
|
||||
stable/operating-scylla/manager/2.1/index.html: https://manager.docs.scylladb.com/
|
||||
/stable/operating-scylla/manager/2.1/architecture.html: https://manager.docs.scylladb.com/
|
||||
/stable/operating-scylla/manager/2.1/install.html: https://manager.docs.scylladb.com/stable/install-scylla-manager.html
|
||||
/stable/operating-scylla/manager/2.1/install-agent.html: https://manager.docs.scylladb.com/stable/install-scylla-manager-agent.html
|
||||
/stable/operating-scylla/manager/2.1/add-a-cluster.html: https://manager.docs.scylladb.com/stable/add-a-cluster.html
|
||||
/stable/operating-scylla/manager/2.1/repair.html: https://manager.docs.scylladb.com/stable/repair/index.html
|
||||
/stable/operating-scylla/manager/2.1/backup.html: https://manager.docs.scylladb.com/stable/backup/index.html
|
||||
/stable/operating-scylla/manager/2.1/extract-schema-from-backup.html: https://manager.docs.scylladb.com/stable/sctool/backup.html
|
||||
/stable/operating-scylla/manager/2.1/restore-a-backup.html: https://manager.docs.scylladb.com/stable/restore/index.html
|
||||
/stable/operating-scylla/manager/2.1/health-check.html: https://manager.docs.scylladb.com/stable/health-check.html
|
||||
/stable/operating-scylla/manager/2.1/sctool.html: https://manager.docs.scylladb.com/stable/sctool/index.html
|
||||
/stable/operating-scylla/manager/2.1/monitoring-manager-integration.html: https://manager.docs.scylladb.com/stable/scylla-monitoring.html
|
||||
/stable/operating-scylla/manager/2.1/use-a-remote-db.html: https://manager.docs.scylladb.com/
|
||||
/stable/operating-scylla/manager/2.1/configuration-file.html: https://manager.docs.scylladb.com/stable/config/scylla-manager-config.html
|
||||
/stable/operating-scylla/manager/2.1/agent-configuration-file.html: https://manager.docs.scylladb.com/stable/config/scylla-manager-agent-config.html
|
||||
|
||||
### moving the CQL reference files to the new cql folder
|
||||
|
||||
/stable/getting-started/ddl.html: /stable/cql/ddl.html
|
||||
@@ -1108,14 +1060,14 @@ tls-ssl/index.html: /stable/operating-scylla/security
|
||||
/using-scylla/integrations/integration_kairos/index.html: /stable/using-scylla/integrations/integration-kairos
|
||||
/upgrade/ami_upgrade/index.html: /stable/upgrade/ami-upgrade
|
||||
|
||||
/scylla-cloud/cloud-setup/gcp-vpc-peering/index.html: /stable/scylla-cloud/cloud-setup/GCP/gcp-vpc-peering
|
||||
/scylla-cloud/cloud-setup/GCP/gcp-vcp-peering/index.html: /stable/scylla-cloud/cloud-setup/GCP/gcp-vpc-peering
|
||||
/scylla-cloud/cloud-setup/gcp-vpc-peering/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/gcp-vpc-peering.html
|
||||
/scylla-cloud/cloud-setup/GCP/gcp-vcp-peering/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/gcp-vpc-peering.html
|
||||
|
||||
# move scylla cloud for AWS to dedicated directory
|
||||
/scylla-cloud/cloud-setup/aws-vpc-peering/index.html: /stable/scylla-cloud/cloud-setup/AWS/aws-vpc-peering
|
||||
/scylla-cloud/cloud-setup/cloud-prom-proxy/index.html: /stable/scylla-cloud/cloud-setup/AWS/cloud-prom-proxy
|
||||
/scylla-cloud/cloud-setup/outposts/index.html: /stable/scylla-cloud/cloud-setup/AWS/outposts
|
||||
/scylla-cloud/cloud-setup/scylla-cloud-byoa/index.html: /stable/scylla-cloud/cloud-setup/AWS/scylla-cloud-byoa
|
||||
/scylla-cloud/cloud-setup/aws-vpc-peering/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/aws-vpc-peering.html
|
||||
/scylla-cloud/cloud-setup/cloud-prom-proxy/index.html: https://cloud.docs.scylladb.com/stable/monitoring/cloud-prom-proxy.html
|
||||
/scylla-cloud/cloud-setup/outposts/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/outposts.html
|
||||
/scylla-cloud/cloud-setup/scylla-cloud-byoa/index.html: https://cloud.docs.scylladb.com/stable/cloud-setup/scylla-cloud-byoa.html
|
||||
/scylla-cloud/cloud-services/scylla_cloud_costs/index.html: /stable/scylla-cloud/cloud-services/scylla-cloud-costs
|
||||
/scylla-cloud/cloud-services/scylla_cloud_managin_versions/index.html: /stable/scylla-cloud/cloud-services/scylla-cloud-managin-versions
|
||||
/scylla-cloud/cloud-services/scylla_cloud_support_alerts_sla/index.html: /stable/scylla-cloud/cloud-services/scylla-cloud-support-alerts-sla
|
||||
|
||||
@@ -134,7 +134,7 @@ isolation policy for a specific table can be overridden by tagging the table
|
||||
This section provides only a very brief introduction to Alternator's
|
||||
design. A much more detailed document about the features of the DynamoDB
|
||||
API and how they are, or could be, implemented in Scylla can be found in:
|
||||
https://docs.google.com/document/d/1i4yjF5OSAazAY_-T8CBce9-2ykW4twx_E_Nt2zDoOVs
|
||||
<https://docs.google.com/document/d/1i4yjF5OSAazAY_-T8CBce9-2ykW4twx_E_Nt2zDoOVs>
|
||||
|
||||
Almost all of Alternator's source code (except some initialization code)
|
||||
can be found in the alternator/ subdirectory of Scylla's source code.
|
||||
|
||||
@@ -26,7 +26,7 @@ request for this single URL to many different backend nodes. Such a
|
||||
load-balancing setup is *not* included inside Alternator. You should either
|
||||
set one up, or configure the client library to do the load balancing itself.
|
||||
Instructions for doing this can be found in:
|
||||
https://github.com/scylladb/alternator-load-balancing/
|
||||
<https://github.com/scylladb/alternator-load-balancing/>
|
||||
|
||||
## Write isolation policies
|
||||
|
||||
@@ -125,7 +125,7 @@ All of this is not yet implemented in Alternator.
|
||||
Scylla has an advanced and extensive monitoring framework for inspecting
|
||||
and graphing hundreds of different metrics of Scylla's usage and performance.
|
||||
Scylla's monitoring stack, based on Grafana and Prometheus, is described in
|
||||
https://docs.scylladb.com/operating-scylla/monitoring/.
|
||||
<https://docs.scylladb.com/operating-scylla/monitoring/>.
|
||||
This monitoring stack is different from DynamoDB's offering - but Scylla's
|
||||
is significantly more powerful and gives the user better insights on
|
||||
the internals of the database and its performance.
|
||||
@@ -160,7 +160,7 @@ experimental:
|
||||
One thing that this implementation is still missing is that expiration
|
||||
events appear in the Streams API as normal deletions - without the
|
||||
distinctive marker on deletions which are really expirations.
|
||||
https://github.com/scylladb/scylla/issues/5060
|
||||
<https://github.com/scylladb/scylla/issues/5060>
|
||||
|
||||
* The DynamoDB Streams API for capturing change is supported, but still
|
||||
considered experimental so needs to be enabled explicitly with the
|
||||
@@ -172,12 +172,12 @@ experimental:
|
||||
* While in DynamoDB data usually appears in the stream less than a second
|
||||
after it was written, in Alternator Streams there is currently a 10
|
||||
second delay by default.
|
||||
https://github.com/scylladb/scylla/issues/6929
|
||||
<https://github.com/scylladb/scylla/issues/6929>
|
||||
* Some events are represented differently in Alternator Streams. For
|
||||
example, a single PutItem is represented by a REMOVE + MODIFY event,
|
||||
instead of just a single MODIFY or INSERT.
|
||||
https://github.com/scylladb/scylla/issues/6930
|
||||
https://github.com/scylladb/scylla/issues/6918
|
||||
<https://github.com/scylladb/scylla/issues/6930>
|
||||
<https://github.com/scylladb/scylla/issues/6918>
|
||||
|
||||
## Unimplemented API features
|
||||
|
||||
@@ -189,18 +189,18 @@ they should be easy to detect. Here is a list of these unimplemented features:
|
||||
* Currently in Alternator, a GSI (Global Secondary Index) can only be added
|
||||
to a table at table creation time. Unlike DynamoDB which also allows adding
|
||||
a GSI (but not an LSI) to an existing table using an UpdateTable operation.
|
||||
https://github.com/scylladb/scylla/issues/5022
|
||||
<https://github.com/scylladb/scylla/issues/5022>
|
||||
|
||||
* GSI (Global Secondary Index) and LSI (Local Secondary Index) may be
|
||||
configured to project only a subset of the base-table attributes to the
|
||||
index. This option is not yet respected by Alternator - all attributes
|
||||
are projected. This wastes some disk space when it is not needed.
|
||||
https://github.com/scylladb/scylla/issues/5036
|
||||
<https://github.com/scylladb/scylla/issues/5036>
|
||||
|
||||
* DynamoDB's new multi-item transaction feature (TransactWriteItems,
|
||||
TransactGetItems) is not supported. Note that the older single-item
|
||||
conditional updates feature are fully supported.
|
||||
https://github.com/scylladb/scylla/issues/5064
|
||||
<https://github.com/scylladb/scylla/issues/5064>
|
||||
|
||||
* Alternator does not yet support the DynamoDB API calls that control which
|
||||
table is available in which data center (DC): CreateGlobalTable,
|
||||
@@ -211,19 +211,19 @@ they should be easy to detect. Here is a list of these unimplemented features:
|
||||
If a DC is added after a table is created, the table won't be visible from
|
||||
the new DC and changing that requires a CQL "ALTER TABLE" statement to
|
||||
modify the table's replication strategy.
|
||||
https://github.com/scylladb/scylla/issues/5062
|
||||
<https://github.com/scylladb/scylla/issues/5062>
|
||||
|
||||
* Recently DynamoDB added support, in addition to the DynamoDB Streams API,
|
||||
also for the similar Kinesis Streams. Alternator doesn't support this yet,
|
||||
and the related operations DescribeKinesisStreamingDestination,
|
||||
DisableKinesisStreamingDestination, and EnableKinesisStreamingDestination.
|
||||
https://github.com/scylladb/scylla/issues/8786
|
||||
<https://github.com/scylladb/scylla/issues/8786>
|
||||
|
||||
* The on-demand backup APIs are not supported: CreateBackup, DescribeBackup,
|
||||
DeleteBackup, ListBackups, RestoreTableFromBackup.
|
||||
For now, users can use Scylla's existing backup solutions such as snapshots
|
||||
or Scylla Manager.
|
||||
https://github.com/scylladb/scylla/issues/5063
|
||||
<https://github.com/scylladb/scylla/issues/5063>
|
||||
|
||||
* Continuous backup (the ability to restore any point in time) is also not
|
||||
supported: UpdateContinuousBackups, DescribeContinuousBackups,
|
||||
@@ -237,28 +237,28 @@ they should be easy to detect. Here is a list of these unimplemented features:
|
||||
BillingMode option is ignored by Alternator, and if a provisioned throughput
|
||||
is specified, it is ignored. Requests which are asked to return the amount
|
||||
of provisioned throughput used by the request do not return it in Alternator.
|
||||
https://github.com/scylladb/scylla/issues/5068
|
||||
<https://github.com/scylladb/scylla/issues/5068>
|
||||
|
||||
* DAX (DynamoDB Accelerator), an in-memory cache for DynamoDB, is not
|
||||
available in for Alternator. Anyway, it should not be necessary - Scylla's
|
||||
internal cache is already rather advanced and there is no need to place
|
||||
another cache in front of the it. We wrote more about this here:
|
||||
https://www.scylladb.com/2017/07/31/database-caches-not-good/
|
||||
<https://www.scylladb.com/2017/07/31/database-caches-not-good/>
|
||||
|
||||
* The DescribeTable is missing information about creation data and size
|
||||
estimates, and also part of the information about indexes enabled on
|
||||
the table.
|
||||
https://github.com/scylladb/scylla/issues/5013
|
||||
https://github.com/scylladb/scylla/issues/5026
|
||||
https://github.com/scylladb/scylla/issues/7550
|
||||
https://github.com/scylladb/scylla/issues/7551
|
||||
<https://github.com/scylladb/scylla/issues/5013>
|
||||
<https://github.com/scylladb/scylla/issues/5026>
|
||||
<https://github.com/scylladb/scylla/issues/7550>
|
||||
<https://github.com/scylladb/scylla/issues/7551 >
|
||||
|
||||
* The recently-added PartiQL syntax (SQL-like SELECT/UPDATE/INSERT/DELETE
|
||||
expressions) and the new operations ExecuteStatement, BatchExecuteStatement
|
||||
and ExecuteTransaction is not yet supported.
|
||||
A user that is interested in an SQL-like syntax can consider using Scylla's
|
||||
CQL protocol instead.
|
||||
https://github.com/scylladb/scylla/issues/8787
|
||||
<https://github.com/scylladb/scylla/issues/8787>
|
||||
|
||||
* As mentioned above, Alternator has its own powerful monitoring framework,
|
||||
which is different from AWS's. In particular, the operations
|
||||
@@ -266,8 +266,8 @@ they should be easy to detect. Here is a list of these unimplemented features:
|
||||
UpdateContributorInsights that configure Amazon's "CloudWatch Contributor
|
||||
Insights" are not yet supported. Scylla has different ways to retrieve the
|
||||
same information, such as which items were accessed most often.
|
||||
https://github.com/scylladb/scylla/issues/8788
|
||||
<https://github.com/scylladb/scylla/issues/8788>
|
||||
|
||||
* Alternator does not support the new DynamoDB feature "export to S3",
|
||||
and its operations DescribeExport, ExportTableToPointInTime, ListExports.
|
||||
https://github.com/scylladb/scylla/issues/8789
|
||||
<https://github.com/scylladb/scylla/issues/8789>
|
||||
|
||||
@@ -4,70 +4,65 @@ Raft Consensus Algorithm in ScyllaDB
|
||||
|
||||
Introduction
|
||||
--------------
|
||||
ScyllaDB was originally designed, following Apache Cassandra, to use gossip for topology and schema updates and the Paxos consensus algorithm for
|
||||
strong data consistency (:doc:`LWT </using-scylla/lwt>`). To achieve stronger consistency without performance penalty, ScyllaDB 5.0 is turning to Raft - a consensus algorithm designed as an alternative to both gossip and Paxos.
|
||||
ScyllaDB was originally designed, following Apache Cassandra, to use gossip for topology and schema updates and the Paxos consensus algorithm for
|
||||
strong data consistency (:doc:`LWT </using-scylla/lwt>`). To achieve stronger consistency without performance penalty, ScyllaDB 5.x has turned to Raft - a consensus algorithm designed as an alternative to both gossip and Paxos.
|
||||
|
||||
Raft is a consensus algorithm that implements a distributed, consistent, replicated log across members (nodes). Raft implements consensus by first electing a distinguished leader, then giving the leader complete responsibility for managing the replicated log. The leader accepts log entries from clients, replicates them on other servers, and tells servers when it is safe to apply log entries to their state machines.
|
||||
|
||||
Raft uses a heartbeat mechanism to trigger a leader election. All servers start as followers and remain in the follower state as long as they receive valid RPCs (heartbeat) from a leader or candidate. A leader sends periodic heartbeats to all followers to maintain his authority (leadership). Suppose a follower receives no communication over a period called the election timeout. In that case, it assumes no viable leader and begins an election to choose a new leader.
|
||||
|
||||
Leader selection is described in detail in the `raft paper <https://raft.github.io/raft.pdf>`_.
|
||||
Leader selection is described in detail in the `Raft paper <https://raft.github.io/raft.pdf>`_.
|
||||
|
||||
Scylla 5.0 uses Raft to maintain schema updates in every node (see below). Any schema update, like ALTER, CREATE or DROP TABLE, is first committed as an entry in the replicated Raft log, and, once stored on most replicas, applied to all nodes **in the same order**, even in the face of a node or network failures.
|
||||
ScyllaDB 5.x may use Raft to maintain schema updates in every node (see below). Any schema update, like ALTER, CREATE or DROP TABLE, is first committed as an entry in the replicated Raft log, and, once stored on most replicas, applied to all nodes **in the same order**, even in the face of a node or network failures.
|
||||
|
||||
Following Scylla 5.x releases will use Raft to guarantee consistent topology updates similarly.
|
||||
Following ScyllaDB 5.x releases will use Raft to guarantee consistent topology updates similarly.
|
||||
|
||||
.. _raft-quorum-requirement:
|
||||
|
||||
Quorum Requirement
|
||||
-------------------
|
||||
|
||||
Raft requires at least a quorum of nodes in a cluster to be available. If multiple nodes fail
|
||||
and the quorum is lost, the cluster is unavailable for schema updates. See :ref:`Handling Failures <raft-handliing-failures>`
|
||||
Raft requires at least a quorum of nodes in a cluster to be available. If multiple nodes fail
|
||||
and the quorum is lost, the cluster is unavailable for schema updates. See :ref:`Handling Failures <raft-handling-failures>`
|
||||
for information on how to handle failures.
|
||||
|
||||
|
||||
Upgrade Considerations for SyllaDB 5.0 and Later
|
||||
==================================================
|
||||
|
||||
Note that when you have a two-DC cluster with the same number of nodes in each DC, the cluster will lose the quorum if one
|
||||
Note that when you have a two-DC cluster with the same number of nodes in each DC, the cluster will lose the quorum if one
|
||||
of the DCs is down.
|
||||
**We recommend configuring three DCs per cluster to ensure that the cluster remains available and operational when one DC is down.**
|
||||
|
||||
Enabling Raft
|
||||
---------------
|
||||
|
||||
Enabling Raft in ScyllaDB 5.0
|
||||
===============================
|
||||
Enabling Raft in ScyllaDB 5.0 and 5.1
|
||||
=====================================
|
||||
|
||||
.. note::
|
||||
In ScyllaDB 5.0:
|
||||
.. warning::
|
||||
In ScyllaDB 5.0 and 5.1, Raft is an experimental feature.
|
||||
|
||||
* Raft is an experimental feature.
|
||||
* Raft implementation only covers safe schema changes. See :ref:`Safe Schema Changes with Raft <raft-schema-changes>`.
|
||||
It is not possible to enable Raft in an existing cluster in ScyllaDB 5.0 and 5.1.
|
||||
In order to have a Raft-enabled cluster in these versions, you must create a new cluster with Raft enabled from the start.
|
||||
|
||||
If you are creating a new cluster, add ``raft`` to the list of experimental features in your ``scylla.yaml`` file:
|
||||
.. warning::
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
experimental_features:
|
||||
- raft
|
||||
**Do not** use Raft in production clusters in ScyllaDB 5.0 and 5.1. Such clusters won't be able to correctly upgrade to ScyllaDB 5.2.
|
||||
|
||||
If you upgrade to ScyllaDB 5.0 from an earlier version, perform a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart/>`
|
||||
updating the ``scylla.yaml`` file for **each node** in the cluster to enable the experimental Raft feature:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
experimental_features:
|
||||
- raft
|
||||
|
||||
|
||||
When all the nodes in the cluster and updated and restarted, the cluster will begin to use Raft for schema changes.
|
||||
Use Raft only for testing and experimentation in clusters which can be thrown away.
|
||||
|
||||
.. warning::
|
||||
Once enabled, Raft cannot be disabled on your cluster. The cluster nodes will fail to restart if you remove the Raft feature.
|
||||
|
||||
Verifying that Raft Is Enabled
|
||||
When creating a new cluster, add ``raft`` to the list of experimental features in your ``scylla.yaml`` file:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
experimental_features:
|
||||
- raft
|
||||
|
||||
Verifying that Raft is enabled
|
||||
===============================
|
||||
You can verify that Raft is enabled on your cluster in one of the following ways:
|
||||
|
||||
@@ -100,23 +95,23 @@ Safe Schema Changes with Raft
|
||||
-------------------------------
|
||||
In ScyllaDB, schema is based on :doc:`Data Definition Language (DDL) </cql/ddl>`. In earlier ScyllaDB versions, schema changes were tracked via the gossip protocol, which might lead to schema conflicts if the updates are happening concurrently.
|
||||
|
||||
Implementing Raft eliminates schema conflicts and allows full automation of DDL changes under any conditions, as long as a quorum
|
||||
Implementing Raft eliminates schema conflicts and allows full automation of DDL changes under any conditions, as long as a quorum
|
||||
of nodes in the cluster is available. The following examples illustrate how Raft provides the solution to problems with schema changes.
|
||||
|
||||
* A network partition may lead to a split-brain case, where each subset of nodes has a different version of the schema.
|
||||
|
||||
|
||||
With Raft, after a network split, the majority of the cluster can continue performing schema changes, while the minority needs to wait until it can rejoin the majority. Data manipulation statements on the minority can continue unaffected, provided the :ref:`quorum requirement <raft-quorum-requirement>` is satisfied.
|
||||
|
||||
* Two or more conflicting schema updates are happening at the same time. For example, two different columns with the same definition are simultaneously added to the cluster. There is no effective way to resolve the conflict - the cluster will employ the schema with the most recent timestamp, but changes related to the shadowed table will be lost.
|
||||
* Two or more conflicting schema updates are happening at the same time. For example, two different columns with the same definition are simultaneously added to the cluster. There is no effective way to resolve the conflict - the cluster will employ the schema with the most recent timestamp, but changes related to the shadowed table will be lost.
|
||||
|
||||
With Raft, concurrent schema changes are safe.
|
||||
With Raft, concurrent schema changes are safe.
|
||||
|
||||
|
||||
|
||||
In summary, Raft makes schema changes safe, but it requires that a quorum of nodes in the cluster is available.
|
||||
|
||||
|
||||
.. _raft-handliing-failures:
|
||||
.. _raft-handling-failures:
|
||||
|
||||
Handling Failures
|
||||
------------------
|
||||
@@ -175,7 +170,7 @@ Examples
|
||||
* - 1-4 nodes
|
||||
- Schema updates are possible and safe.
|
||||
- Try restarting the nodes. If the nodes are dead, :doc:`replace them with new nodes </operating-scylla/procedures/cluster-management/replace-dead-node-or-more/>`.
|
||||
* - 1 DC
|
||||
* - 1 DC
|
||||
- Schema updates are possible and safe.
|
||||
- When the DC comes back online, try restarting the nodes in the cluster. If the nodes are dead, :doc:`add 3 new nodes in a new region </operating-scylla/procedures/cluster-management/add-dc-to-existing-dc/>`.
|
||||
* - 2 DCs
|
||||
|
||||
@@ -1,20 +1,25 @@
|
||||
:term:`Sorted Strings Table (SSTable)<SSTable>` is the persistent file format used by Scylla and Apache Cassandra. SSTable is saved as a persistent, ordered, immutable set of files on disk.
|
||||
:term:`Sorted Strings Table (SSTable)<SSTable>` is the persistent file format used by ScyllaDB and Apache Cassandra. SSTable is saved as a persistent, ordered, immutable set of files on disk.
|
||||
Immutable means SSTables are never modified; they are created by a MemTable flush and are deleted by a compaction.
|
||||
The location of Scylla SSTables is specified in scylla.yaml ``data_file_directories`` parameter (default location: ``/var/lib/scylla/data``).
|
||||
The location of ScyllaDB SSTables is specified in scylla.yaml ``data_file_directories`` parameter (default location: ``/var/lib/scylla/data``).
|
||||
|
||||
SSTable 3.0 (mc format) is more efficient and requires less disk space than the SSTable 2.x. SSTable version support is as follows:
|
||||
SSTable 3.x is more efficient and requires less disk space than the SSTable 2.x.
|
||||
|
||||
SSTable Version Support
|
||||
------------------------
|
||||
|
||||
.. list-table::
|
||||
:widths: 33 33 33
|
||||
:header-rows: 1
|
||||
|
||||
* - SSTable Version
|
||||
- Scylla Enterprise Version
|
||||
- Scylla Open Source Version
|
||||
- ScyllaDB Enterprise Version
|
||||
- ScyllaDB Open Source Version
|
||||
* - 3.x ('me')
|
||||
- 2022.2
|
||||
- 5.1 and above
|
||||
* - 3.x ('md')
|
||||
- 2021.1
|
||||
- 4.3 and above
|
||||
- 4.3, 4.4, 4.5, 4.6, 5.0
|
||||
* - 3.0 ('mc')
|
||||
- 2019.1, 2020.1
|
||||
- 3.x, 4.1, 4.2
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Scylla SSTable - 3.x
|
||||
====================
|
||||
ScyllaDB SSTable - 3.x
|
||||
=======================
|
||||
|
||||
.. toctree::
|
||||
:hidden:
|
||||
@@ -12,21 +12,24 @@ Scylla SSTable - 3.x
|
||||
|
||||
.. include:: ../_common/sstable_what_is.rst
|
||||
|
||||
* In Scylla 3.1 and above, mc format is enabled by default.
|
||||
* In ScyllaDB 5.1 and above, the ``me`` format is enabled by default.
|
||||
* In ScyllaDB 4.3 to 5.0, the ``md`` format is enabled by default.
|
||||
* In ScyllaDB 3.1 to 4.2, the ``mc`` format is enabled by default.
|
||||
* In ScyllaDB 3.0, the ``mc`` format is disabled by default. You can enable it by adding the ``enable_sstables_mc_format`` parameter set to ``true`` in the ``scylla.yaml`` file. For example:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
enable_sstables_mc_format: true
|
||||
|
||||
* In Scylla 3.0, mc format is disabled by default and can be enabled by adding the ``enable_sstables_mc_format`` parameter as 'true' in ``scylla.yaml`` file.
|
||||
.. REMOVE IN FUTURE VERSIONS - Remove the note above in version 5.2.
|
||||
|
||||
For example:
|
||||
Additional Information
|
||||
-------------------------
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
enable_sstables_mc_format: true
|
||||
|
||||
|
||||
For more information on Scylla 3.x SSTable formats, see below:
|
||||
For more information on ScyllaDB 3.x SSTable formats, see below:
|
||||
|
||||
* :doc:`SSTable 3.0 Data File Format <sstables-3-data-file-format>`
|
||||
* :doc:`SSTable 3.0 Statistics <sstables-3-statistics>`
|
||||
* :doc:`SSTable 3.0 Summary <sstables-3-summary>`
|
||||
* :doc:`SSTable 3.0 Index <sstables-3-index>`
|
||||
* :doc:`SSTable 3.0 Format in Scylla <sstable-format>`
|
||||
* :doc:`SSTable 3.0 Format in ScyllaDB <sstable-format>`
|
||||
|
||||
@@ -28,8 +28,13 @@ Table of contents mc-1-big-TOC.txt
|
||||
|
||||
This document focuses on the data file format but also refers to other components in parts where information stored in them affects the way we read/write the data file.
|
||||
|
||||
Note that the file on-disk format applies both to the "mc" and "md" SSTable format versions.
|
||||
The "md" format only fixed the semantics of the (min|max)_clustering_key fields in the SSTable Statistics file, which are now valid for describing the accurate range of clustering prefixes present in the SSTable.
|
||||
Note that the file on-disk format applies to all "m*" SSTable format versions ("mc", "md", and "me").
|
||||
|
||||
* The "md" format only fixed the semantics of the ``(min|max)_clustering_key`` fields in the SSTable Statistics file,
|
||||
which are now valid for describing the accurate range of clustering prefixes present in the SSTable.
|
||||
* The "me" format added the ``host_id`` of the host writing the SStable to the SSTable Statistics file.
|
||||
It is used to qualify the commit log replay position that is also stored in the SSTable Statistics file.
|
||||
|
||||
See :doc:`SSTables 3.0 Statistics File Format </architecture/sstable/sstable3/sstables-3-statistics>` for more details.
|
||||
|
||||
Overview
|
||||
|
||||
@@ -175,6 +175,13 @@ Whole entry
|
||||
// It contains only one commit log position interval - [lower bound of commit log, upper bound of commit log].
|
||||
|
||||
array<be32<int32_t>, commit_log_interval> commit_log_intervals;
|
||||
|
||||
// Versions MC and MD of SSTable 3.x format end here.
|
||||
|
||||
// UUID of the host that wrote the SSTable.
|
||||
// Qualifies all commitlog positions in the SSTable Statistics file.
|
||||
|
||||
UUID host_id;
|
||||
}
|
||||
|
||||
using clustering_bound = array<be32<int32_t>, clustering_column>;
|
||||
|
||||
@@ -21,8 +21,6 @@
|
||||
Appendices
|
||||
----------
|
||||
|
||||
.. include:: /rst_include/cql-version-index.rst
|
||||
|
||||
.. _appendix-A:
|
||||
|
||||
Appendix A: CQL Keywords
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# ScyllaDB CQL Extensions
|
||||
|
||||
Scylla extends the CQL language to provide a few extra features. This document
|
||||
ScyllaDB extends the CQL language to provide a few extra features. This document
|
||||
lists those extensions.
|
||||
|
||||
## BYPASS CACHE clause
|
||||
@@ -109,7 +109,7 @@ Storage options can be inspected by checking the new system schema table: `syste
|
||||
A special statement is dedicated for pruning ghost rows from materialized views.
|
||||
Ghost row is an inconsistency issue which manifests itself by having rows
|
||||
in a materialized view which do not correspond to any base table rows.
|
||||
Such inconsistencies should be prevented altogether and Scylla is striving to avoid
|
||||
Such inconsistencies should be prevented altogether and ScyllaDB is striving to avoid
|
||||
them, but *if* they happen, this statement can be used to restore a materialized view
|
||||
to a fully consistent state without rebuilding it from scratch.
|
||||
|
||||
@@ -133,21 +133,35 @@ token ranges.
|
||||
|
||||
## Synchronous materialized views
|
||||
|
||||
Materialized view updates can be applied synchronously (with errors propagated
|
||||
back to the user) or asynchronously, in the background. Historically, in order
|
||||
to use synchronous updates, the materialized view had to be local,
|
||||
which could be achieved e.g. by using the same partition key definition
|
||||
as the one present in the base table.
|
||||
Scylla also allows explicitly marking the view as synchronous, which forces
|
||||
all its view updates to be updated synchronously. Such views tend to reduce
|
||||
observed availability of the base table, because a base table write would only
|
||||
succeed if all synchronous view updates also succeed. On the other hand,
|
||||
failed view updates would be detected immediately, and appropriate action
|
||||
can be taken (e.g. pruning the materialized view, as mentioned in the paragraph
|
||||
above).
|
||||
Usually, when a table with materialized views is updated, the update to the
|
||||
views happens _asynchronously_, i.e., in the background. This means that
|
||||
the user cannot know when the view updates have all finished - or even be
|
||||
sure that they succeeded.
|
||||
|
||||
In order to mark a materialized view as synchronous, one can use the following
|
||||
syntax:
|
||||
However, there are circumstances where ScyllaDB does view updates
|
||||
_synchronously_ - i.e., the user's write returns only after the views
|
||||
were updated. This happens when the materialized-view replica is on the
|
||||
same node as the base-table replica. For example, if the base table and
|
||||
the view have the same partition key. Note that only ScyllaDB guarantees
|
||||
synchronous view updates in this case - they are asynchronous in Cassandra.
|
||||
|
||||
ScyllaDB also allows explicitly marking a view as synchronous. When a view
|
||||
is marked synchronous, base-table updates will wait for that view to be
|
||||
updated before returning. A base table may have multiple views marked
|
||||
synchronous, and will wait for all of them. The consistency level of a
|
||||
write applies to synchronous views as well as to the base table: For
|
||||
example, writing with QUORUM consistency level returns only after a
|
||||
quorum of the base-table replicas were updated *and* also a quorum of
|
||||
each synchronous view table was also updated.
|
||||
|
||||
Synchronous views tend to reduce the observed availability of the base table,
|
||||
because a base-table write would only succeed if enough synchronous view
|
||||
updates also succeed. On the other hand, failed view updates would be
|
||||
detected immediately, and appropriate action can be taken, such as retrying
|
||||
the write or pruning the materialized view (as mentioned in the previous
|
||||
section). This can improve the consistency of the base table with its views.
|
||||
|
||||
To create a new materialized view with synchronous updates, use:
|
||||
|
||||
```cql
|
||||
CREATE MATERIALIZED VIEW main.mv
|
||||
@@ -157,12 +171,18 @@ CREATE MATERIALIZED VIEW main.mv
|
||||
WITH synchronous_updates = true;
|
||||
```
|
||||
|
||||
To make an existing materialized view synchronous, use:
|
||||
|
||||
```cql
|
||||
ALTER MATERIALIZED VIEW main.mv WITH synchronous_updates = true;
|
||||
```
|
||||
|
||||
Synchronous updates can also be dynamically turned off by setting
|
||||
the value of `synchronous_updates` to `false`.
|
||||
To return a materialized view to the default behavior (which, as explained
|
||||
above, _usually_ means asynchronous updates), use:
|
||||
|
||||
```cql
|
||||
ALTER MATERIALIZED VIEW main.mv WITH synchronous_updates = false;
|
||||
```
|
||||
|
||||
### Synchronous global secondary indexes
|
||||
|
||||
@@ -261,7 +281,7 @@ that the rate of requests exceeds configured limit, the cluster will start
|
||||
rejecting some of them in order to bring the throughput back to the configured
|
||||
limit. Rejected requests are less costly which can help reduce overload.
|
||||
|
||||
_NOTE_: Due to Scylla's distributed nature, tracking per-partition request rates
|
||||
_NOTE_: Due to ScyllaDB's distributed nature, tracking per-partition request rates
|
||||
is not perfect and the actual rate of accepted requests may be higher up to
|
||||
a factor of keyspace's `RF`. This feature should not be used to enforce precise
|
||||
limits but rather serve as an overload protection feature.
|
||||
|
||||
@@ -3,9 +3,6 @@
|
||||
Data Definition
|
||||
===============
|
||||
|
||||
|
||||
.. include:: /rst_include/cql-version-index.rst
|
||||
|
||||
CQL stores data in *tables*, whose schema defines the layout of said data in the table, and those tables are grouped in
|
||||
*keyspaces*. A keyspace defines a number of options that apply to all the tables it contains, most prominently of
|
||||
which is the replication strategy used by the keyspace. An application can have only one keyspace. However, it is also possible to
|
||||
@@ -634,7 +631,7 @@ A table supports the following options:
|
||||
- map
|
||||
- see below
|
||||
- :ref:`Compaction options <cql-compaction-options>`
|
||||
* - ``compaction``
|
||||
* - ``compression``
|
||||
- map
|
||||
- see below
|
||||
- :ref:`Compression options <cql-compression-options>`
|
||||
@@ -863,6 +860,18 @@ Other considerations:
|
||||
- Adding new columns (see ``ALTER TABLE`` below) is a constant time operation. There is thus no need to try to
|
||||
anticipate future usage when creating a table.
|
||||
|
||||
.. _ddl-per-parition-rate-limit:
|
||||
|
||||
Limiting the rate of requests per partition
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You can limit the read rates and writes rates into a partition by applying
|
||||
a ScyllaDB CQL extension to the CREATE TABLE or ALTER TABLE statements.
|
||||
See `Per-partition rate limit <https://docs.scylladb.com/stable/cql/cql-extensions.html#per-partition-rate-limit>`_
|
||||
for details.
|
||||
|
||||
.. REMOVE IN FUTURE VERSIONS - Remove the URL above (temporary solution) and replace it with a relative link (once the solution is applied).
|
||||
|
||||
.. _alter-table-statement:
|
||||
|
||||
ALTER TABLE
|
||||
@@ -921,6 +930,7 @@ The ``ALTER TABLE`` statement can:
|
||||
The same note applies to the set of ``compression`` sub-options.
|
||||
- Change or add any of the ``Encryption options`` above.
|
||||
- Change or add any of the :ref:`CDC options <cdc-options>` above.
|
||||
- Change or add per-partition rate limits. See :ref:`Limiting the rate of requests per partition <ddl-per-parition-rate-limit>`.
|
||||
|
||||
.. warning:: Dropping a column assumes that the timestamps used for the value of this column are "real" timestamp in
|
||||
microseconds. Using "real" timestamps in microseconds is the default is and is **strongly** recommended, but as
|
||||
@@ -930,7 +940,6 @@ The ``ALTER TABLE`` statement can:
|
||||
.. warning:: Once a column is dropped, it is allowed to re-add a column with the same name as the dropped one
|
||||
**unless** the type of the dropped column was a (non-frozen) column (due to an internal technical limitation).
|
||||
|
||||
|
||||
.. _drop-table-statement:
|
||||
|
||||
DROP TABLE
|
||||
|
||||
@@ -22,8 +22,6 @@
|
||||
Definitions
|
||||
-----------
|
||||
|
||||
.. include:: /rst_include/cql-version-index.rst
|
||||
|
||||
.. _conventions:
|
||||
|
||||
Conventions
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
Data Manipulation
|
||||
-----------------
|
||||
|
||||
.. include:: /rst_include/cql-version-index.rst
|
||||
|
||||
This section describes the statements supported by CQL to insert, update, delete, and query data.
|
||||
|
||||
:ref:`SELECT <select-statement>`
|
||||
@@ -99,11 +97,12 @@ alternatively, of the wildcard character (``*``) to select all the columns defin
|
||||
Selectors
|
||||
`````````
|
||||
|
||||
A :token:`selector` can be one of:
|
||||
A :token:`selector` can be one of the following:
|
||||
|
||||
- A column name of the table selected to retrieve the values for that column.
|
||||
- A casting, which allows you to convert a nested selector to a (compatible) type.
|
||||
- A function call, where the arguments are selector themselves.
|
||||
- A call to the :ref:`COUNT function <count-function>`, which counts all non-null results.
|
||||
|
||||
Aliases
|
||||
```````
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
.. _cql-functions:
|
||||
|
||||
.. Need some intro for UDF and native functions in general and point those to it.
|
||||
.. _udfs:
|
||||
.. _native-functions:
|
||||
|
||||
Functions
|
||||
@@ -33,13 +32,15 @@ CQL supports two main categories of functions:
|
||||
- The :ref:`aggregate functions <aggregate-functions>`, which are used to aggregate multiple rows of results from a
|
||||
``SELECT`` statement.
|
||||
|
||||
.. In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined
|
||||
.. functions.
|
||||
In both cases, CQL provides a number of native "hard-coded" functions as well as the ability to create new user-defined
|
||||
functions.
|
||||
|
||||
.. .. note:: By default, the use of user-defined functions is disabled by default for security concerns (even when
|
||||
.. enabled, the execution of user-defined functions is sandboxed and a "rogue" function should not be allowed to do
|
||||
.. evil, but no sandbox is perfect so using user-defined functions is opt-in). See the ``enable_user_defined_functions``
|
||||
.. in ``scylla.yaml`` to enable them.
|
||||
.. note:: Although user-defined functions are sandboxed, protecting the system from a "rogue" function, user-defined functions are disabled by default for extra security.
|
||||
See the ``enable_user_defined_functions`` in ``scylla.yaml`` to enable them.
|
||||
|
||||
Additionally, user-defined functions are still experimental and need to be explicitly enabled by adding ``udf`` to the list of
|
||||
``experimental_features`` configuration options in ``scylla.yaml``, or turning on the ``experimental`` flag.
|
||||
See :ref:`Enabling Experimental Features <yaml_enabling_experimental_features>` for details.
|
||||
|
||||
.. A function is identifier by its name:
|
||||
|
||||
@@ -60,11 +61,11 @@ Native functions
|
||||
Cast
|
||||
````
|
||||
|
||||
Supported starting from Scylla version 2.1
|
||||
Supported starting from ScyllaDB version 2.1
|
||||
|
||||
The ``cast`` function can be used to convert one native datatype to another.
|
||||
|
||||
The following table describes the conversions supported by the ``cast`` function. Scylla will silently ignore any cast converting a cast datatype into its own datatype.
|
||||
The following table describes the conversions supported by the ``cast`` function. ScyllaDB will silently ignore any cast converting a cast datatype into its own datatype.
|
||||
|
||||
=============== =======================================================================================================
|
||||
From To
|
||||
@@ -228,6 +229,65 @@ A number of functions are provided to “convert” the native types into binary
|
||||
takes a 64-bit ``blob`` argument and converts it to a ``bigint`` value. For example, ``bigintAsBlob(3)`` is
|
||||
``0x0000000000000003`` and ``blobAsBigint(0x0000000000000003)`` is ``3``.
|
||||
|
||||
.. _udfs:
|
||||
|
||||
User-defined functions :label-caution:`Experimental`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
User-defined functions (UDFs) execute user-provided code in ScyllaDB. Supported languages are currently Lua and WebAssembly.
|
||||
|
||||
UDFs are part of the ScyllaDB schema and are automatically propagated to all nodes in the cluster.
|
||||
UDFs can be overloaded, so that multiple UDFs with different argument types can have the same function name, for example::
|
||||
|
||||
CREATE FUNCTION sample ( arg int ) ...;
|
||||
CREATE FUNCTION sample ( arg text ) ...;
|
||||
|
||||
When calling a user-defined function, arguments can be literals or terms. Prepared statement placeholders can be used, too.
|
||||
|
||||
CREATE FUNCTION statement
|
||||
`````````````````````````
|
||||
|
||||
Creating a new user-defined function uses the ``CREATE FUNCTION`` statement. For example::
|
||||
|
||||
CREATE OR REPLACE FUNCTION div(dividend double, divisor double)
|
||||
RETURNS NULL ON NULL INPUT
|
||||
RETURNS double
|
||||
LANGUAGE LUA
|
||||
AS 'return dividend/divisor;';
|
||||
|
||||
``CREATE FUNCTION`` with the optional ``OR REPLACE`` keywords creates either a function
|
||||
or replaces an existing one with the same signature. A ``CREATE FUNCTION`` without ``OR REPLACE``
|
||||
fails if a function with the same signature already exists. If the optional ``IF NOT EXISTS``
|
||||
keywords are used, the function will only be created only if another function with the same
|
||||
signature does not exist. ``OR REPLACE`` and ``IF NOT EXISTS`` cannot be used together.
|
||||
|
||||
Behavior for null input values must be defined for each function:
|
||||
|
||||
* ``RETURNS NULL ON NULL INPUT`` declares that the function will always return null (without being executed) if any of the input arguments is null.
|
||||
* ``CALLED ON NULL INPUT`` declares that the function will always be executed.
|
||||
|
||||
Function Signature
|
||||
``````````````````
|
||||
|
||||
Signatures are used to distinguish individual functions. The signature consists of a fully-qualified function name of the <keyspace>.<function_name> and a concatenated list of all the argument types.
|
||||
|
||||
Note that keyspace names, function names and argument types are subject to the default naming conventions and case-sensitivity rules.
|
||||
|
||||
Functions belong to a keyspace; if no keyspace is specified, the current keyspace is used. User-defined functions are not allowed in the system keyspaces.
|
||||
|
||||
DROP FUNCTION statement
|
||||
```````````````````````
|
||||
|
||||
Dropping a function uses the ``DROP FUNCTION`` statement. For example::
|
||||
|
||||
DROP FUNCTION myfunction;
|
||||
DROP FUNCTION mykeyspace.afunction;
|
||||
DROP FUNCTION afunction ( int );
|
||||
DROP FUNCTION afunction ( text );
|
||||
|
||||
You must specify the argument types of the function, the arguments_signature, in the drop command if there are multiple overloaded functions with the same name but different signatures.
|
||||
``DROP FUNCTION`` with the optional ``IF EXISTS`` keywords drops a function if it exists, but does not throw an error if it doesn’t.
|
||||
|
||||
.. _aggregate-functions:
|
||||
|
||||
Aggregate functions
|
||||
@@ -261,6 +321,10 @@ It also can be used to count the non-null value of a given column::
|
||||
|
||||
SELECT COUNT (scores) FROM plays;
|
||||
|
||||
.. note::
|
||||
Counting all rows in a table may be time-consuming and exceed the default timeout. In such a case,
|
||||
see :doc:`Counting all rows in a table is slow </kb/count-all-rows>` for instructions.
|
||||
|
||||
Max and Min
|
||||
```````````
|
||||
|
||||
@@ -286,6 +350,59 @@ instance::
|
||||
|
||||
.. _user-defined-aggregates-functions:
|
||||
|
||||
User-defined aggregates (UDAs) :label-caution:`Experimental`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
User-defined aggregates allow the creation of custom aggregate functions. User-defined aggregates can be used in SELECT statement.
|
||||
|
||||
Each aggregate requires an initial state of type ``STYPE`` defined with the ``INITCOND`` value (default value: ``null``). The first argument of the state function must have type STYPE. The remaining arguments of the state function must match the types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by the state function becomes the new state. After all rows are processed, the optional FINALFUNC is executed with the last state value as its argument.
|
||||
|
||||
The ``STYPE`` value is mandatory in order to distinguish possibly overloaded versions of the state and/or final function, since the overload can appear after creation of the aggregate.
|
||||
|
||||
A complete working example for user-defined aggregates (assuming that a keyspace has been selected using the ``USE`` statement)::
|
||||
|
||||
CREATE FUNCTION accumulate_len(acc tuple<bigint,bigint>, a text)
|
||||
RETURNS NULL ON NULL INPUT
|
||||
RETURNS tuple<bigint,bigint>
|
||||
LANGUAGE lua as 'return {acc[1] + 1, acc[2] + #a}';
|
||||
|
||||
CREATE OR REPLACE FUNCTION present(res tuple<bigint,bigint>)
|
||||
RETURNS NULL ON NULL INPUT
|
||||
RETURNS text
|
||||
LANGUAGE lua as
|
||||
'return "The average string length is " .. res[2]/res[1] .. "!"';
|
||||
|
||||
CREATE OR REPLACE AGGREGATE avg_length(text)
|
||||
SFUNC accumulate_len
|
||||
STYPE tuple<bigint,bigint>
|
||||
FINALFUNC present
|
||||
INITCOND (0,0);
|
||||
|
||||
CREATE AGGREGATE statement
|
||||
``````````````````````````
|
||||
|
||||
The ``CREATE AGGREGATE`` command with the optional ``OR REPLACE`` keywords creates either an aggregate or replaces an existing one with the same signature. A ``CREATE AGGREGATE`` without ``OR REPLACE`` fails if an aggregate with the same signature already exists. The ``CREATE AGGREGATE`` command with the optional ``IF NOT EXISTS`` keywords creates an aggregate if it does not already exist. The ``OR REPLACE`` and ``IF NOT EXISTS`` phrases cannot be used together.
|
||||
|
||||
The ``STYPE`` value defines the type of the state value and must be specified. The optional ``INITCOND`` defines the initial state value for the aggregate; the default value is null. A non-null ``INITCOND`` must be specified for state functions that are declared with ``RETURNS NULL ON NULL INPUT``.
|
||||
|
||||
The ``SFUNC`` value references an existing function to use as the state-modifying function. The first argument of the state function must have type ``STYPE``. The remaining arguments of the state function must match the types of the user-defined aggregate arguments. The state function is called once for each row, and the value returned by the state function becomes the new state. State is not updated for state functions declared with ``RETURNS NULL ON NULL INPUT`` and called with null. After all rows are processed, the optional ``FINALFUNC`` is executed with last state value as its argument. It must take only one argument with type ``STYPE``, but the return type of the ``FINALFUNC`` may be a different type. A final function declared with ``RETURNS NULL ON NULL INPUT`` means that the aggregate’s return value will be null, if the last state is null.
|
||||
|
||||
If no ``FINALFUNC`` is defined, the overall return type of the aggregate function is ``STYPE``. If a ``FINALFUNC`` is defined, it is the return type of that function.
|
||||
|
||||
DROP AGGREGATE statement
|
||||
````````````````````````
|
||||
|
||||
Dropping an user-defined aggregate function uses the DROP AGGREGATE statement. For example::
|
||||
|
||||
DROP AGGREGATE myAggregate;
|
||||
DROP AGGREGATE myKeyspace.anAggregate;
|
||||
DROP AGGREGATE someAggregate ( int );
|
||||
DROP AGGREGATE someAggregate ( text );
|
||||
|
||||
The ``DROP AGGREGATE`` statement removes an aggregate created using ``CREATE AGGREGATE``. You must specify the argument types of the aggregate to drop if there are multiple overloaded aggregates with the same name but a different signature.
|
||||
|
||||
The ``DROP AGGREGATE`` command with the optional ``IF EXISTS`` keywords drops an aggregate if it exists, and does nothing if a function with the signature does not exist.
|
||||
|
||||
.. include:: /rst_include/apache-cql-return-index.rst
|
||||
|
||||
.. include:: /rst_include/apache-copyrights.rst
|
||||
.. include:: /rst_include/apache-copyrights.rst
|
||||
@@ -24,9 +24,6 @@ Materialized Views
|
||||
------------------
|
||||
Production ready in Scylla Open Source 3.0 and Scylla Enterprise 2019.1.x
|
||||
|
||||
.. include:: /rst_include/cql-version-index.rst
|
||||
|
||||
|
||||
Materialized views names are defined by:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
@@ -8,12 +8,6 @@ Data Types
|
||||
.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier
|
||||
|
||||
|
||||
|
||||
|
||||
.. include:: /rst_include/cql-version-index.rst
|
||||
|
||||
|
||||
|
||||
CQL is a typed language and supports a rich set of data types, including :ref:`native types <native-types>` and
|
||||
:ref:`collection types <collections>`.
|
||||
|
||||
|
||||
@@ -8,47 +8,47 @@ Getting Started
|
||||
install-scylla/index
|
||||
configure
|
||||
requirements
|
||||
Migrate to Scylla </using-scylla/migrate-scylla>
|
||||
Migrate to ScyllaDB </using-scylla/migrate-scylla>
|
||||
Integration Solutions </using-scylla/integrations/index>
|
||||
tutorials
|
||||
|
||||
.. panel-box::
|
||||
:title: Scylla Requirements
|
||||
:title: ScyllaDB Requirements
|
||||
:id: "getting-started"
|
||||
:class: my-panel
|
||||
|
||||
* :doc:`Scylla System Requirements Guide</getting-started/system-requirements/>`
|
||||
* :doc:`ScyllaDB System Requirements Guide</getting-started/system-requirements/>`
|
||||
* :doc:`OS Support by Platform and Version</getting-started/os-support/>`
|
||||
|
||||
.. panel-box::
|
||||
:title: Install and Configure Scylla
|
||||
:title: Install and Configure ScyllaDB
|
||||
:id: "getting-started"
|
||||
:class: my-panel
|
||||
|
||||
* `Install Scylla (Binary Packages, Docker, or EC2) <https://www.scylladb.com/download/>`_ - Links to the ScyllaDB Download Center
|
||||
* `Install ScyllaDB (Binary Packages, Docker, or EC2) <https://www.scylladb.com/download/#core>`_ - Links to the ScyllaDB Download Center
|
||||
|
||||
* :doc:`Configure Scylla</getting-started/system-configuration/>`
|
||||
* :doc:`Run Scylla in a Shared Environment </getting-started/scylla-in-a-shared-environment>`
|
||||
* :doc:`Create a Scylla Cluster - Single Data Center (DC) </operating-scylla/procedures/cluster-management/create-cluster/>`
|
||||
* :doc:`Create a Scylla Cluster - Multi Data Center (DC) </operating-scylla/procedures/cluster-management/create-cluster-multidc/>`
|
||||
* :doc:`Configure ScyllaDB </getting-started/system-configuration/>`
|
||||
* :doc:`Run ScyllaDB in a Shared Environment </getting-started/scylla-in-a-shared-environment>`
|
||||
* :doc:`Create a ScyllaDB Cluster - Single Data Center (DC) </operating-scylla/procedures/cluster-management/create-cluster/>`
|
||||
* :doc:`Create a ScyllaDB Cluster - Multi Data Center (DC) </operating-scylla/procedures/cluster-management/create-cluster-multidc/>`
|
||||
|
||||
.. panel-box::
|
||||
:title: Develop Applications for Scylla
|
||||
:title: Develop Applications for ScyllaDB
|
||||
:id: "getting-started"
|
||||
:class: my-panel
|
||||
|
||||
* :doc:`Scylla Drivers</using-scylla/drivers/index>`
|
||||
* `Get Started Lesson on Scylla University <https://university.scylladb.com/courses/scylla-essentials-overview/lessons/quick-wins-install-and-run-scylla/>`_
|
||||
* :doc:`ScyllaDB Drivers</using-scylla/drivers/index>`
|
||||
* `Get Started Lesson on ScyllaDB University <https://university.scylladb.com/courses/scylla-essentials-overview/lessons/quick-wins-install-and-run-scylla/>`_
|
||||
* :doc:`CQL Reference </cql/index>`
|
||||
* :doc:`cqlsh - the CQL shell </cql/cqlsh/>`
|
||||
|
||||
.. panel-box::
|
||||
:title: Use Scylla with Third-party Solutions
|
||||
:title: Use ScyllaDB with Third-party Solutions
|
||||
:id: "getting-started"
|
||||
:class: my-panel
|
||||
|
||||
* :doc:`Migrate to Scylla </using-scylla/migrate-scylla>` - How to migrate your current database to Scylla
|
||||
* :doc:`Integrate with Scylla </using-scylla/integrations/index>` - Integration solutions with Scylla
|
||||
* :doc:`Migrate to ScyllaDB </using-scylla/migrate-scylla>` - How to migrate your current database to Scylla
|
||||
* :doc:`Integrate with ScyllaDB </using-scylla/integrations/index>` - Integration solutions with Scylla
|
||||
|
||||
|
||||
.. panel-box::
|
||||
|
||||
@@ -20,7 +20,7 @@ Install Scylla
|
||||
|
||||
Keep your versions up-to-date. The two latest versions are supported. Also always install the latest patches for your version.
|
||||
|
||||
* Download and install Scylla Server, Drivers and Tools in `Scylla Download Center <https://www.scylladb.com/download/#server/>`_
|
||||
* Download and install ScyllaDB Server, Drivers and Tools in `ScyllaDB Download Center <https://www.scylladb.com/download/#core>`_
|
||||
* :doc:`ScyllaDB Web Installer for Linux <scylla-web-installer>`
|
||||
* :doc:`Scylla Unified Installer (relocatable executable) <unified-installer>`
|
||||
* :doc:`Air-gapped Server Installation <air-gapped-install>`
|
||||
|
||||
@@ -4,7 +4,7 @@ ScyllaDB Web Installer for Linux
|
||||
|
||||
ScyllaDB Web Installer is a platform-agnostic installation script you can run with ``curl`` to install ScyllaDB on Linux.
|
||||
|
||||
See `ScyllaDB Download Center <https://www.scylladb.com/download/#server>`_ for information on manually installing ScyllaDB with platform-specific installation packages.
|
||||
See `ScyllaDB Download Center <https://www.scylladb.com/download/#core>`_ for information on manually installing ScyllaDB with platform-specific installation packages.
|
||||
|
||||
Prerequisites
|
||||
--------------
|
||||
|
||||
@@ -1,81 +1,93 @@
|
||||
OS Support by Platform and Version
|
||||
==================================
|
||||
|
||||
The following matrix shows which Operating Systems, Platforms, and Containers / Instance Engines are supported with which versions of Scylla.
|
||||
The following matrix shows which Operating Systems, Platforms, and Containers / Instance Engines are supported with which versions of ScyllaDB.
|
||||
|
||||
Scylla requires a fix to the XFS append introduced in kernel 3.15 (back-ported to 3.10 in RHEL/CentOS).
|
||||
Scylla will not run with earlier kernel versions. Details in `Scylla issue 885 <https://github.com/scylladb/scylla/issues/885>`_.
|
||||
ScyllaDB requires a fix to the XFS append introduced in kernel 3.15 (back-ported to 3.10 in RHEL/CentOS).
|
||||
ScyllaDB will not run with earlier kernel versions. Details in `ScyllaDB issue 885 <https://github.com/scylladb/scylla/issues/885>`_.
|
||||
|
||||
.. REMOVE IN FUTURE VERSIONS - Remove information about versions from the notes below in version 5.2.
|
||||
|
||||
.. note::
|
||||
|
||||
**Supported Architecture**
|
||||
|
||||
Scylla Open Source supports x86_64 for all versions and aarch64 starting from Scylla 4.6 and nightly build. In particular, aarch64 support includes AWS EC2 Graviton.
|
||||
|
||||
For Scylla Open Source **4.5** and later, the recommended OS and Scylla AMI/IMage OS is Ubuntu 20.04.4 LTS.
|
||||
ScyllaDB Open Source supports x86_64 for all versions and AArch64 starting from ScyllaDB 4.6 and nightly build. In particular, aarch64 support includes AWS EC2 Graviton.
|
||||
|
||||
|
||||
Scylla Open Source
|
||||
-------------------
|
||||
ScyllaDB Open Source
|
||||
----------------------
|
||||
|
||||
.. note:: For Enterprise versions **prior to** 4.6, the recommended OS and Scylla AMI/Image OS is CentOS 7.
|
||||
.. note::
|
||||
|
||||
For Scylla Open Source versions **4.6 and later**, the recommended OS and Scylla AMI/Image OS is Ubuntu 20.04.
|
||||
Recommended OS and ScyllaDB AMI/Image OS for ScyllaDB Open Source:
|
||||
|
||||
- Ubuntu 20.04 for versions 4.6 and later.
|
||||
- CentOS 7 for versions earlier than 4.6.
|
||||
|
||||
|
||||
|
||||
+--------------------------+----------------------------------+-----------------------------+-------------+
|
||||
| Platform | Ubuntu | Debian | Centos/RHEL |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| Scylla Version / Version | 14.04| 16.04| 18.04|20.04 |22.04 | 8 | 9 | 10 | 11 | 7 | 8 |
|
||||
+==========================+======+======+======+======+======+======+======+=======+=======+======+======+
|
||||
| 5.0 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.6 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.5 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.4 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.3 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.2 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.1 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 4.0 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |v| | |x| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 3.x | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |v| | |x| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 2.3 | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |x| | |v| | |x| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
| 2.2 | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |x| | |v| | |x| |
|
||||
+--------------------------+------+------+------+------+------+------+------+-------+-------+------+------+
|
||||
+----------------------------+----------------------------------+-----------------------------+---------+-------+
|
||||
| Platform | Ubuntu | Debian | CentOS /| Rocky/|
|
||||
| | | | RHEL | RHEL |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| ScyllaDB Version / Version | 14.04| 16.04| 18.04|20.04 |22.04 | 8 | 9 | 10 | 11 | 7 | 8 |
|
||||
+============================+======+======+======+======+======+======+======+=======+=======+=========+=======+
|
||||
| 5.1 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 5.0 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.6 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.5 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.4 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.3 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.2 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.1 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 4.0 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 3.x | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 2.3 | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |x| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
| 2.2 | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |x| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+------+------+------+-------+-------+---------+-------+
|
||||
|
||||
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 4.3).
|
||||
|
||||
|
||||
Scylla Enterprise
|
||||
-----------------
|
||||
ScyllaDB Enterprise
|
||||
--------------------
|
||||
|
||||
.. note:: Enterprise versions **prior to** 2021.1, the recommended OS and Scylla AMI/IMage OS is CentOS 7.
|
||||
.. note::
|
||||
Recommended OS and ScyllaDB AMI/Image OS for ScyllaDB Enterprise:
|
||||
|
||||
For Enterprise versions **2021.1 and later**, the recommended OS and Scylla AMI/IMage OS is Ubuntu 20.04.4 LTS.
|
||||
- Ubuntu 20.04 for versions 2021.1 and later.
|
||||
- CentOS 7 for versions earlier than 2021.1.
|
||||
|
||||
For Enterprise versions **2021.1 and later**, the recommended OS and Scylla AMI/Image OS is Ubuntu 20.04.
|
||||
|
||||
+--------------------------+---------------------------+--------------------+------------+
|
||||
| Platform | Ubuntu | Debian | Centos/RHEL|
|
||||
+--------------------------+------+------+------+------+------+------+------+------+-----+
|
||||
| Scylla Version / Version | 14 | 16 | 18 | 20 | 8 | 9 | 10 | 7 | 8 |
|
||||
+==========================+======+======+======+======+======+======+======+======+=====+
|
||||
| 2021.1 | |x| | |v| | |v| | |v| | |x| | |v| | |v| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+------+-----+
|
||||
| 2020.1 | |x| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+--------------------------+------+------+------+------+------+------+------+------+-----+
|
||||
| 2019.1 | |x| | |v| | |v| | |x| | |x| | |v| | |x| | |v| | |x| |
|
||||
+--------------------------+------+------+------+------+------+------+------+------+-----+
|
||||
| 2018.1 | |v| | |v| | |x| | |x| | |v| | |x| | |x| | |v| | |x| |
|
||||
+--------------------------+------+------+------+------+------+------+------+------+-----+
|
||||
+----------------------------+-----------------------------------+---------------------------+--------+-------+
|
||||
| Platform | Ubuntu | Debian | CentOS/| Rocky/|
|
||||
| | | | RHEL | RHEL |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| ScyllaDB Version / Version | 14.04| 16.04| 18.04| 20.04| 22.04 | 8 | 9 | 10 | 11 | 7 | 8 |
|
||||
+============================+======+======+======+======+=======+======+======+======+======+========+=======+
|
||||
| 2022.2 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2022.1 | |x| | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2021.1 | |x| | |v| | |v| | |v| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2020.1 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |v| | |x| | |v| | |v| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2019.1 | |x| | |v| | |v| | |x| | |x| | |x| | |v| | |x| | |x| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
| 2018.1 | |v| | |v| | |x| | |x| | |v| | |x| | |x| | |x| | |x| | |v| | |x| |
|
||||
+----------------------------+------+------+------+------+-------+------+------+------+------+--------+-------+
|
||||
|
||||
|
||||
All releases are available as a Docker container, EC2 AMI, and a GCP image (GCP image from version 2021.1).
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
===================
|
||||
Scylla Requirements
|
||||
===================
|
||||
=====================
|
||||
ScyllaDB Requirements
|
||||
=====================
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
@@ -22,9 +22,9 @@ Scylla Requirements
|
||||
</div>
|
||||
<div class="medium-9 columns">
|
||||
|
||||
* :doc:`Scylla System Requirements Guide</getting-started/system-requirements/>`
|
||||
* :doc:`ScyllaDB System Requirements Guide</getting-started/system-requirements/>`
|
||||
* :doc:`OS Support by Platform and Version</getting-started/os-support/>`
|
||||
* :doc:`Running Scylla in a shared environment </getting-started/scylla-in-a-shared-environment>`
|
||||
* :doc:`Running ScyllaDB in a Shared Environment </getting-started/scylla-in-a-shared-environment>`
|
||||
|
||||
.. raw:: html
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
:image: /_static/img/mascots/scylla-docs.svg
|
||||
:search_box:
|
||||
|
||||
The most up-to-date documents for the fastest, best performing, high availability NoSQL database.
|
||||
New to ScyllaDB? Start `here <https://cloud.docs.scylladb.com/stable/scylladb-basics/>`_!
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -26,28 +26,29 @@
|
||||
<div class="grid-x grid-margin-x hs">
|
||||
|
||||
.. topic-box::
|
||||
:title: Let us manage your DB
|
||||
:title: ScyllaDB Cloud
|
||||
:link: https://cloud.docs.scylladb.com
|
||||
:class: large-4
|
||||
:anchor: Get Started with Scylla Cloud
|
||||
:anchor: ScyllaDB Cloud Documentation
|
||||
|
||||
Take advantage of Scylla Cloud, a fully-managed database-as-a-service.
|
||||
Simplify application development with ScyllaDB Cloud - a fully managed database-as-a-service.
|
||||
|
||||
.. topic-box::
|
||||
:title: Manage your own DB
|
||||
:title: ScyllaDB Enterprise
|
||||
:link: https://enterprise.docs.scylladb.com
|
||||
:class: large-4
|
||||
:anchor: ScyllaDB Enterprise Documentation
|
||||
|
||||
Deploy and manage ScyllaDB's most stable enterprise-grade database with premium features and 24/7 support.
|
||||
|
||||
.. topic-box::
|
||||
:title: ScyllaDB Open Source
|
||||
:link: getting-started
|
||||
:class: large-4
|
||||
:anchor: Get Started with Scylla
|
||||
:anchor: ScyllaDB Open Source Documentation
|
||||
|
||||
Provision and manage a Scylla cluster in your environment.
|
||||
Deploy and manage your database in your environment.
|
||||
|
||||
.. topic-box::
|
||||
:title: Connect your application to Scylla
|
||||
:link: using-scylla/drivers
|
||||
:class: large-4
|
||||
:anchor: Choose a Driver
|
||||
|
||||
Use high performance Scylla drivers to connect your application to a Scylla cluster.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -57,75 +58,50 @@
|
||||
|
||||
<div class="topics-grid topics-grid--products">
|
||||
|
||||
<h2 class="topics-grid__title">Our Product List</h2>
|
||||
<p class="topics-grid__text">To begin choose a product from the list below</p>
|
||||
<h2 class="topics-grid__title">Other Products</h2>
|
||||
|
||||
<div class="grid-container full">
|
||||
<div class="grid-x grid-margin-x">
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Enterprise
|
||||
:link: getting-started
|
||||
:image: /_static/img/mascots/scylla-enterprise.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
|
||||
ScyllaDB’s most stable high-performance enterprise-grade NoSQL database.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Open Source
|
||||
:link: getting-started
|
||||
:image: /_static/img/mascots/scylla-opensource.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
|
||||
A high-performance NoSQL database with a close-to-the-hardware, shared-nothing approach.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Cloud
|
||||
:link: https://cloud.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-cloud.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
|
||||
A fully managed NoSQL database as a service powered by Scylla Enterprise.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Alternator
|
||||
:title: ScyllaDB Alternator
|
||||
:link: https://docs.scylladb.com/stable/alternator/alternator.html
|
||||
:image: /_static/img/mascots/scylla-alternator.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Open source Amazon DynamoDB-compatible API.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Monitoring Stack
|
||||
:title: ScyllaDB Monitoring Stack
|
||||
:link: https://monitoring.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-monitor.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Complete open source monitoring solution for your Scylla clusters.
|
||||
Complete open source monitoring solution for your ScyllaDB clusters.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Manager
|
||||
:title: ScyllaDB Manager
|
||||
:link: https://manager.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-manager.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Hassle-free Scylla NoSQL database management for scale-out clusters.
|
||||
Hassle-free ScyllaDB NoSQL database management for scale-out clusters.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Drivers
|
||||
:title: ScyllaDB Drivers
|
||||
:link: https://docs.scylladb.com/stable/using-scylla/drivers/
|
||||
:image: /_static/img/mascots/scylla-drivers.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Shard-aware drivers for superior performance.
|
||||
|
||||
.. topic-box::
|
||||
:title: Scylla Operator
|
||||
:title: ScyllaDB Operator
|
||||
:link: https://operator.docs.scylladb.com
|
||||
:image: /_static/img/mascots/scylla-enterprise.svg
|
||||
:class: topic-box--product,large-3,small-6
|
||||
:class: topic-box--product,large-4,small-6
|
||||
|
||||
Easily run and manage your Scylla Cluster on Kubernetes.
|
||||
Easily run and manage your ScyllaDB cluster on Kubernetes.
|
||||
|
||||
.. raw:: html
|
||||
|
||||
@@ -135,19 +111,19 @@
|
||||
|
||||
<div class="topics-grid">
|
||||
|
||||
<h2 class="topics-grid__title">Learn More About Scylla</h2>
|
||||
<h2 class="topics-grid__title">Learn More About ScyllaDB</h2>
|
||||
<p class="topics-grid__text"></p>
|
||||
<div class="grid-container full">
|
||||
<div class="grid-x grid-margin-x">
|
||||
|
||||
.. topic-box::
|
||||
:title: Attend Scylla University
|
||||
:title: Attend ScyllaDB University
|
||||
:link: https://university.scylladb.com/
|
||||
:image: /_static/img/mascots/scylla-university.png
|
||||
:class: large-6,small-12
|
||||
:anchor: Find a Class
|
||||
|
||||
| Register to take a *free* class at Scylla University.
|
||||
| Register to take a *free* class at ScyllaDB University.
|
||||
| There are several learning paths to choose from.
|
||||
|
||||
.. topic-box::
|
||||
@@ -178,9 +154,9 @@
|
||||
architecture/index
|
||||
troubleshooting/index
|
||||
kb/index
|
||||
Scylla University <https://university.scylladb.com/>
|
||||
ScyllaDB University <https://university.scylladb.com/>
|
||||
faq
|
||||
Contribute to Scylla <contribute>
|
||||
Contribute to ScyllaDB <contribute>
|
||||
glossary
|
||||
alternator/alternator
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
Counting all rows in a table is slow
|
||||
====================================
|
||||
|
||||
**Audience: Scylla users**
|
||||
**Audience: ScyllaDB users**
|
||||
|
||||
Trying to count all rows in a table using
|
||||
|
||||
@@ -10,14 +10,21 @@ Trying to count all rows in a table using
|
||||
|
||||
SELECT COUNT(1) FROM ks.table;
|
||||
|
||||
often fails with **ReadTimeout** error.
|
||||
may fail with the **ReadTimeout** error.
|
||||
|
||||
COUNT() is running a full-scan query on all nodes, which might take a long time to finish. Often the time is greater than Scylla query timeout.
|
||||
One way to bypass this in Scylla 4.4 or later is increasing the timeout for this query using the :ref:`USING TIMEOUT <using-timeout>` directive, for example:
|
||||
COUNT() runs a full-scan query on all nodes, which might take a long time to finish. As a result, the count time may be greater than the ScyllaDB query timeout.
|
||||
One way to prevent that issue in Scylla 4.4 or later is to increase the timeout for the query using the :ref:`USING TIMEOUT <using-timeout>` directive, for example:
|
||||
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
SELECT COUNT(1) FROM ks.table USING TIMEOUT 120s;
|
||||
|
||||
You can also get an *estimation* of the number **of partitions** (not rows) with :doc:`nodetool tablestats </operating-scylla/nodetool-commands/tablestats>`
|
||||
You can also get an *estimation* of the number **of partitions** (not rows) with :doc:`nodetool tablestats </operating-scylla/nodetool-commands/tablestats>`.
|
||||
|
||||
.. note::
|
||||
ScyllaDB 5.1 includes improvements to speed up the execution of SELECT COUNT(*) queries.
|
||||
To increase the count speed, we recommend upgrading to ScyllaDB 5.1 or later.
|
||||
|
||||
|
||||
.. REMOVE IN FUTURE VERSIONS - Remove the note above in version 5.1.
|
||||
|
||||
@@ -55,6 +55,7 @@ Knowledge Base
|
||||
* :doc:`Map CPUs to Scylla Shards </kb/map-cpu>` - Mapping between CPUs and Scylla shards
|
||||
* :doc:`Recreate RAID devices </kb/raid-device>` - How to recreate your RAID devices without running scylla-setup
|
||||
* :doc:`Configure Scylla Networking with Multiple NIC/IP Combinations </kb/yaml-address>` - examples for setting the different IP addresses in scylla.yaml
|
||||
* :doc:`Updating the Mode in perftune.yaml After a ScyllaDB Upgrade </kb/perftune-modes-sync>`
|
||||
* :doc:`Kafka Sink Connector Quickstart </using-scylla/integrations/kafka-connector>`
|
||||
* :doc:`Kafka Sink Connector Configuration </using-scylla/integrations/sink-config>`
|
||||
|
||||
|
||||
48
docs/kb/perftune-modes-sync.rst
Normal file
48
docs/kb/perftune-modes-sync.rst
Normal file
@@ -0,0 +1,48 @@
|
||||
==============================================================
|
||||
Updating the Mode in perftune.yaml After a ScyllaDB Upgrade
|
||||
==============================================================
|
||||
|
||||
In versions 5.1 (ScyllaDB Open Source) and 2022.2 (ScyllaDB Enterprise), we improved ScyllaDB's performance by `removing the rx_queues_count from the mode
|
||||
condition <https://github.com/scylladb/seastar/pull/949>`_. As a result, ScyllaDB operates in
|
||||
the ``sq_split`` mode instead of the ``mq`` mode (see :doc:`Seastar Perftune </operating-scylla/admin-tools/perftune>` for information about the modes).
|
||||
If you upgrade from an earlier version of ScyllaDB, your cluster's existing nodes may use the ``mq`` mode,
|
||||
while new nodes will use the ``sq_split`` mode. As using different modes across one cluster is not recommended,
|
||||
you should change the configuration to ensure that the ``sq_split`` mode is used on all nodes.
|
||||
|
||||
This section describes how to update the `perftune.yaml` file to configure the ``sq_split`` mode on all nodes.
|
||||
|
||||
Procedure
|
||||
------------
|
||||
The examples below assume that you are using the default locations for storing data and the `scylla.yaml` file,
|
||||
and that your NIC is ``eth5``.
|
||||
|
||||
#. Backup your old configuration.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
sudo mv /etc/scylla.d/cpuset.conf /etc/scylla.d/cpuset.conf.old
|
||||
sudo mv /etc/scylla.d/perftune.yaml /etc/scylla.d/perftune.yaml.old
|
||||
|
||||
#. Create a new configuration.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
sudo scylla_sysconfig_setup --nic eth5 --homedir /var/lib/scylla --confdir /etc/scylla
|
||||
|
||||
A new ``/etc/scylla.d/cpuset.conf`` will be generated on the output.
|
||||
|
||||
#. Compare the contents of the newly generated ``/etc/scylla.d/cpuset.conf`` with ``/etc/scylla.d/cpuset.conf.old`` you created in step 1.
|
||||
|
||||
- If they are exactly the same, rename ``/etc/scylla.d/perftune.yaml.old`` you created in step 1 back to ``/etc/scylla.d/perftune.yaml`` and continue to the next node.
|
||||
- If they are different, move on to the next steps.
|
||||
|
||||
#. Restart the ``scylla-server`` service.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl restart scylla-server
|
||||
|
||||
#. Wait for the service to become up and running (similarly to how it is done during a :doc:`rolling restart </operating-scylla/procedures/config-change/rolling-restart>`). It may take a considerable amount of time before the node is in the UN state due to resharding.
|
||||
|
||||
#. Continue to the next node.
|
||||
@@ -42,7 +42,7 @@ Steps:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
nodetool compact <keyspace>.<mytable>;
|
||||
nodetool compact <keyspace> <mytable>;
|
||||
|
||||
5. Alter the table and change the grace period back to the original ``gc_grace_seconds`` value.
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
* :doc:`REST - Scylla REST/HTTP Admin API</operating-scylla/rest>`.
|
||||
* :doc:`Tracing </using-scylla/tracing>` - a ScyllaDB tool for debugging and analyzing internal flows in the server.
|
||||
* :doc:`SSTableloader </operating-scylla/admin-tools/sstableloader>` - Bulk load the sstables found in the directory to a Scylla cluster
|
||||
* :doc:`scylla-sstable </operating-scylla/admin-tools/scylla-sstable>` - Validates and dumps the content of SStables, generates a histogram, dumps the content of the SStable index.
|
||||
* :doc:`scylla-types </operating-scylla/admin-tools/scylla-types/>` - Examines raw values obtained from SStables, logs, coredumps, etc.
|
||||
* :doc:`Scylla SStable </operating-scylla/admin-tools/scylla-sstable>` - Validates and dumps the content of SStables, generates a histogram, dumps the content of the SStable index.
|
||||
* :doc:`Scylla Types </operating-scylla/admin-tools/scylla-types/>` - Examines raw values obtained from SStables, logs, coredumps, etc.
|
||||
* :doc:`cassandra-stress </operating-scylla/admin-tools/cassandra-stress/>` A tool for benchmarking and load testing a Scylla and Cassandra clusters.
|
||||
* :doc:`SSTabledump - Scylla 3.0, Scylla Enterprise 2019.1 and newer versions </operating-scylla/admin-tools/sstabledump>`
|
||||
* :doc:`SSTable2JSON - Scylla 2.3 and older </operating-scylla/admin-tools/sstable2json>`
|
||||
|
||||
@@ -9,8 +9,8 @@ Admin Tools
|
||||
CQLSh </cql/cqlsh>
|
||||
REST </operating-scylla/rest>
|
||||
Tracing </using-scylla/tracing>
|
||||
scylla-sstable
|
||||
scylla-types </operating-scylla/admin-tools/scylla-types/>
|
||||
Scylla SStable </operating-scylla/admin-tools/scylla-sstable/>
|
||||
Scylla Types </operating-scylla/admin-tools/scylla-types/>
|
||||
sstableloader
|
||||
cassandra-stress </operating-scylla/admin-tools/cassandra-stress/>
|
||||
sstabledump
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
scylla-sstable
|
||||
Scylla SStable
|
||||
==============
|
||||
|
||||
.. versionadded:: 5.0
|
||||
@@ -9,7 +9,17 @@ Introduction
|
||||
This tool allows you to examine the content of SStables by performing operations such as dumping the content of SStables,
|
||||
generating a histogram, validating the content of SStables, and more. See `Supported Operations`_ for the list of available operations.
|
||||
|
||||
Run ``scylla-sstable --help`` for additional information about the tool and the operations.
|
||||
Run ``scylla sstable --help`` for additional information about the tool and the operations.
|
||||
|
||||
This tool is similar to SStableDump_, with notable differences:
|
||||
|
||||
* Built on the ScyllaDB C++ codebase, it supports all SStable formats and components that ScyllaDB supports.
|
||||
* Expanded scope: this tool supports much more than dumping SStable data components (see `Supported Operations`_).
|
||||
* More flexible on how schema is obtained and where SStables are located: SStableDump_ only supports dumping SStables located in their native data directory. To dump an SStable, one has to clone the entire ScyllaDB data directory tree, including system table directories and even config files. ``scylla sstable`` can dump sstables from any path with multiple choices on how to obtain the schema, see Schema_.
|
||||
|
||||
Currently, SStableDump_ works better on production systems as it automatically loads the schema from the system tables, unlike ``scylla sstable``, which has to be provided with the schema explicitly. On the other hand ``scylla sstable`` works better for off-line investigations, as it can be used with as little as just a schema definition file and a single sstable. In the future we plan on closing this gap -- adding support for automatic schema-loading for ``scylla sstable`` too -- and completely supplant SStableDump_ with ``scylla sstable``.
|
||||
|
||||
.. _SStableDump: /operating-scylla/admin-tools/sstabledump
|
||||
|
||||
Usage
|
||||
------
|
||||
@@ -21,11 +31,82 @@ The command syntax is as follows:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
scylla-sstable <operation> <path to SStable>
|
||||
scylla sstable <operation> <path to SStable>
|
||||
|
||||
|
||||
You can specify more than one SStable.
|
||||
|
||||
Schema
|
||||
^^^^^^
|
||||
All operations need a schema to interpret the SStables with.
|
||||
Currently, there are two ways to obtain the schema:
|
||||
|
||||
* ``--schema-file FILENAME`` - Read the schema definition from a file.
|
||||
* ``--system-schema KEYSPACE.TABLE`` - Use the known definition of built-in tables (only works for system tables).
|
||||
|
||||
By default, the tool uses the first method: ``--schema-file schema.cql``; i.e. it assumes there is a schema file named ``schema.cql`` in the working directory.
|
||||
If this fails, it will exit with an error.
|
||||
|
||||
The schema file should contain all definitions needed to interpret data belonging to the table.
|
||||
|
||||
Example ``schema.cql``:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'mydc1': 1, 'mydc2': 4};
|
||||
|
||||
CREATE TYPE ks.mytype (
|
||||
f1 int,
|
||||
f2 text
|
||||
);
|
||||
|
||||
CREATE TABLE ks.cf (
|
||||
pk int,
|
||||
ck text,
|
||||
v1 int,
|
||||
v2 mytype,
|
||||
PRIMARY KEY (pk, ck)
|
||||
);
|
||||
|
||||
Note:
|
||||
|
||||
* In addition to the table itself, the definition also has to includes any user defined types the table uses.
|
||||
* The keyspace definition is optional, if missing one will be auto-generated.
|
||||
* The schema file doesn't have to be called ``schema.cql``, this is just the default name. Any file name is supported (with any extension).
|
||||
|
||||
Dropped columns
|
||||
***************
|
||||
|
||||
The examined sstable might have columns which were dropped from the schema definition. In this case providing the up-do-date schema will not be enough, the tool will fail when attempting to process a cell for the dropped column.
|
||||
Dropped columns can be provided to the tool in the form of insert statements into the ``system_schema.dropped_columns`` system table, in the schema definition file. Example:
|
||||
|
||||
.. code-block:: cql
|
||||
|
||||
INSERT INTO system_schema.dropped_columns (
|
||||
keyspace_name,
|
||||
table_name,
|
||||
column_name,
|
||||
dropped_time,
|
||||
type
|
||||
) VALUES (
|
||||
'ks',
|
||||
'cf',
|
||||
'v1',
|
||||
1631011979170675,
|
||||
'int'
|
||||
);
|
||||
|
||||
CREATE TABLE ks.cf (pk int PRIMARY KEY, v2 int);
|
||||
|
||||
System tables
|
||||
*************
|
||||
|
||||
If the examined table is a system table -- it belongs to one of the system keyspaces (``system``, ``system_schema``, ``system_distributed`` or ``system_distributed_everywhere``) -- you can just tell the tool to use the known built-in definition of said table. This is possible with the ``--system-schema`` flag. Example:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
scylla sstable dump-data --system-schema system.local ./path/to/md-123456-big-Data.db
|
||||
|
||||
Supported Operations
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The ``dump-*`` operations output JSON. For ``dump-data``, you can specify another output format.
|
||||
@@ -56,17 +137,17 @@ Dumping the content of the SStable:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
scylla-sstable dump-data /path/to/md-123456-big-Data.db
|
||||
scylla sstable dump-data /path/to/md-123456-big-Data.db
|
||||
|
||||
Dumping the content of two SStables as a unified stream:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
scylla-sstable dump-data --merge /path/to/md-123456-big-Data.db /path/to/md-123457-big-Data.db
|
||||
scylla sstable dump-data --merge /path/to/md-123456-big-Data.db /path/to/md-123457-big-Data.db
|
||||
|
||||
|
||||
Validating the specified SStables:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
scylla-sstable validate /path/to/md-123456-big-Data.db /path/to/md-123457-big-Data.db
|
||||
scylla sstable validate /path/to/md-123456-big-Data.db /path/to/md-123457-big-Data.db
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
scylla-types
|
||||
Scylla Types
|
||||
==============
|
||||
|
||||
.. versionadded:: 5.0
|
||||
@@ -26,7 +26,7 @@ The command syntax is as follows:
|
||||
* Provide the values in the hex form without a leading 0x prefix.
|
||||
* You must specify the type of the provided values. See :ref:`Specifying the Value Type <scylla-types-type>`.
|
||||
* The number of provided values depends on the operation. See :ref:`Supported Operations <scylla-types-operations>` for details.
|
||||
* The scylla-types operations come with additional options. See :ref:`Additional Options <scylla-types-options>` for the list of options.
|
||||
* The ``scylla types`` operations come with additional options. See :ref:`Additional Options <scylla-types-options>` for the list of options.
|
||||
|
||||
.. _scylla-types-type:
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@ SSTabledump
|
||||
This tool allows you to converts SSTable into a JSON format file.
|
||||
SSTabledump supported when using Scylla 3.0, Scylla Enterprise 2019.1, and newer versions.
|
||||
In older versions, the tool is named SSTable2json_.
|
||||
If you need more flexibility or want to dump more than just the data-component, see scylla-sstable_.
|
||||
|
||||
.. _SSTable2json: /operating-scylla/admin-tools/sstable2json
|
||||
.. _scylla-sstable: /operating-scylla/admin-tools/scylla-sstable
|
||||
|
||||
Use the full path to the data file when executing the command.
|
||||
|
||||
|
||||
@@ -9,12 +9,9 @@ Scylla for Administrators
|
||||
Procedures <procedures/index>
|
||||
security/index
|
||||
admin-tools/index
|
||||
manager/index
|
||||
ScyllaDB Monitoring Stack <https://monitoring.docs.scylladb.com/>
|
||||
ScyllaDB Operator <https://operator.docs.scylladb.com/>
|
||||
ScyllaDB Manager <https://manager.docs.scylladb.com/>
|
||||
Scylla Monitoring Stack <monitoring/index>
|
||||
Scylla Operator <scylla-operator/index>
|
||||
Upgrade Procedures </upgrade/index>
|
||||
System Configuration <system-configuration/index>
|
||||
benchmarking-scylla
|
||||
@@ -36,15 +33,9 @@ Scylla for Administrators
|
||||
:class: my-panel
|
||||
|
||||
* :doc:`Scylla Tools </operating-scylla/admin-tools/index>` - Tools for Administrating and integrating with Scylla
|
||||
<<<<<<< HEAD
|
||||
* :doc:`Scylla Manager </operating-scylla/manager/index>` - Tool for cluster administration and automation
|
||||
* `ScyllaDB Monitoring Stack <https://monitoring.docs.scylladb.com/stable/>`_ - Tool for cluster monitoring and alerting
|
||||
* `ScyllaDB Operator <https://operator.docs.scylladb.com>`_ - Tool to run Scylla on Kubernetes
|
||||
=======
|
||||
* `ScyllaDB Manager <https://manager.docs.scylladb.com/>`_ - Tool for cluster administration and automation
|
||||
* :doc:`Scylla Monitoring Stack </operating-scylla/monitoring/index>` - Tool for cluster monitoring and alerting
|
||||
* :doc:`Scylla Operator </operating-scylla/scylla-operator/index>` - Tool to run Scylla on Kubernetes
|
||||
>>>>>>> 40050f951 (doc: add the link to manager.docs.scylladb.com to the toctree)
|
||||
* :doc:`Scylla Logs </getting-started/logging/>`
|
||||
|
||||
.. panel-box::
|
||||
|
||||
@@ -102,4 +102,4 @@ Cluster Management Procedures
|
||||
|
||||
Procedures for handling failures and practical examples of different scenarios.
|
||||
|
||||
* :ref:`Handling Failures<raft-handliing-failures>`
|
||||
* :ref:`Handling Failures<raft-handling-failures>`
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
.. Note:: This reference covers CQL specification version |cql-version|
|
||||
|
||||
@@ -68,7 +68,7 @@ Gracefully stop the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
@@ -92,13 +92,13 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
3. Check scylla-enterprise-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after 2 minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
@@ -130,7 +130,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to the previous release
|
||||
----------------------------------
|
||||
@@ -164,7 +164,7 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
@@ -7,9 +7,7 @@ This document is a step-by-step procedure for upgrading from ScyllaDB Open Sourc
|
||||
|
||||
Applicable Versions
|
||||
===================
|
||||
This guide covers upgrading ScyllaDB from version 5.0.x to ScyllaDB Enterprise version 2022.1.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
This guide covers upgrading ScyllaDB from version 5.0.x to ScyllaDB Enterprise version 2022.1.y on |OS|. See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported |OS| versions.
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
@@ -30,7 +28,7 @@ Apply the following procedure **serially** on each node. Do not move to the next
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2022.1 features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See :doc:`here </operating-scylla/manager/2.1/sctool>` for suspending Scylla Manager's scheduled or running repairs.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/>`_ for suspending Scylla Manager's scheduled or running repairs.
|
||||
* Not to apply schema changes.
|
||||
|
||||
|
||||
@@ -116,7 +114,7 @@ New io.conf format was introduced in ScyllaDB 2.3 and 2019.1. If your io.conf do
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-enterprise-server start
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
@@ -156,7 +154,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-enterprise-server stop
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the old release
|
||||
------------------------------------
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
======================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step-by-step procedure for upgrading from |SCYLLA_NAME| |FROM| to |SCYLLA_NAME| |TO|, and rollback to 2021.1 if required.
|
||||
|
||||
Applicable versions
|
||||
===================
|
||||
This guide covers upgrading |SCYLLA_NAME| from version |FROM| to version |TO| on |OS|. See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/enterprise_2022.1_warnings.rst
|
||||
|
||||
A ScyllaDB Enterprise upgrade is a rolling procedure which does **not** require a full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain the node and backup the data
|
||||
* Check your current release
|
||||
* Backup the configuration file
|
||||
* Stop ScyllaDB
|
||||
* Download and install the new ScyllaDB packages
|
||||
* Start ScyllaDB
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating that the node that you upgraded is up and running the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2022.x.z features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes.
|
||||
* Not to apply schema changes.
|
||||
|
||||
Upgrade Steps
|
||||
=============
|
||||
|
||||
Drain the node and backup the data
|
||||
------------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In ScyllaDB, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is completed on all nodes, the snapshot should be removed with the ``nodetool clearsnapshot -t <snapshot>`` command, or you risk running out of space.
|
||||
|
||||
Backup the configuration file
|
||||
-------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2022.x.z
|
||||
|
||||
Backup more config files.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf $conf.backup-2.1; done
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-enterprise-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2022.x.y version, stop right here! This guide only covers 2022.x.y to 2022.x.z upgrades.
|
||||
@@ -0,0 +1,95 @@
|
||||
**To upgrade ScyllaDB:**
|
||||
|
||||
#. Update the |APT|_ to **2022.x**.
|
||||
#. Install:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get clean all
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla-enterprise
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
#. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check again after 2 minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from ScyllaDB Enterprise release 2022.x.z to 2022.x.y. Apply this procedure if an upgrade from 2022.x.y to 2022.x.z failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2022.x.z.
|
||||
|
||||
ScyllaDB rollback is a rolling procedure which does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to 2022.x.y, you will:
|
||||
|
||||
* Gracefully shutdown ScyllaDB
|
||||
* Downgrade to the previous release
|
||||
* Restore the configuration file
|
||||
* Restart ScyllaDB
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating that the node is up and running the new version.
|
||||
|
||||
Rollback Steps
|
||||
==============
|
||||
|
||||
Gracefully shutdown ScyllaDB
|
||||
-----------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to the previous release
|
||||
----------------------------------
|
||||
|
||||
Install:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla-enterprise=2022.x.y\* scylla-enterprise-server=2022.x.y\* scylla-enterprise-jmx=2022.x.y\* scylla-enterprise-tools=2022.x.y\* scylla-enterprise-tools-core=2022.x.y\* scylla-enterprise-kernel-conf=2022.x.y\* scylla-enterprise-conf=2022.x.y\* scylla-enterprise-python3=2022.x.y\*
|
||||
sudo apt-get install scylla-enterprise-machine-image=2022.x.y\* # only execute on AMI instance
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2022.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Restore more config files.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
for conf in $(cat /var/lib/dpkg/info/scylla-*server.conffiles /var/lib/dpkg/info/scylla-*conf.conffiles /var/lib/dpkg/info/scylla-*jmx.conffiles | grep -v init ); do sudo cp -v $conf.backup-2.1 $conf; done
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -0,0 +1,2 @@
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-patch-ubuntu-and-debian-p1.rst
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-patch-ubuntu-and-debian-p2.rst
|
||||
@@ -0,0 +1,79 @@
|
||||
=============================================================================
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step-by-step procedure for upgrading from ScyllaDB Enterprise 2021.1 to ScyllaDB Enterprise 2022.1, and rollback to 2021.1 if required.
|
||||
|
||||
|
||||
Applicable Versions
|
||||
===================
|
||||
This guide covers upgrading ScyllaDB Enterprise from version **2021.1.8** or later to ScyllaDB Enterprise version 2022.1.y on |OS|. See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
.. include:: /upgrade/upgrade-enterprise/_common/enterprise_2022.1_warnings.rst
|
||||
|
||||
A ScyllaDB upgrade is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check the cluster schema
|
||||
* Drain the node and backup the data
|
||||
* Backup the configuration file
|
||||
* Stop ScyllaDB
|
||||
* Download and install the new ScyllaDB packages
|
||||
* Start ScyllaDB
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node that you upgraded is up and running the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new 2022.1 features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild, or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending ScyllaDB Manager's scheduled or running repairs.
|
||||
* Not to apply schema changes.
|
||||
|
||||
.. include:: /upgrade/_common/upgrade_to_2022_warning.rst
|
||||
|
||||
Upgrade Steps
|
||||
=============
|
||||
Check the cluster schema
|
||||
-------------------------
|
||||
Make sure that all nodes have the schema synched before the upgrade. The upgrade will fail if there is a schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain the nodes and backup the data
|
||||
-------------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In ScyllaDB, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is completed on all nodes, the snapshot should be removed with the ``nodetool clearsnapshot -t <snapshot>`` command to prevent running out of space.
|
||||
|
||||
Backup the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-2021.1
|
||||
|
||||
Gracefully stop the node
|
||||
------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -l scylla\*server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a 2021.1.x version, stop right here! This guide only covers 2021.1.x to 2022.1.y upgrades.
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,127 @@
|
||||
**To upgrade ScyllaDB:**
|
||||
|
||||
#. Update the |APT|_ to **2022.1** and enable scylla/ppa repo.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
Ubuntu 16:
|
||||
sudo add-apt-repository -y ppa:scylladb/ppa
|
||||
|
||||
#. Configure Java 1.8, which is requested by ScyllaDB Enterprise 2022.1.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y openjdk-8-jre-headless
|
||||
sudo update-java-alternatives -s java-1.8.0-openjdk-amd64
|
||||
|
||||
#. Install:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get clean all
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla-enterprise
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
A new io.conf format was introduced in Scylla 2.3 and 2019.1. If your io.conf doesn't contain `--io-properties-file` option, then it's still the old format. You need to re-run the io setup to generate new io.conf.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo scylla_io_setup
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
#. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check again after two minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
See :doc:`Scylla Metrics Update - Scylla Enterprise 2021.1 to 2022.1<metric-update-2021.1-to-2022.1>` for more information.
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from ScyllaDB Enterprise release 2022.1.x to 2022.1.y. Apply this procedure if an upgrade from 2021.1 to 2022.1 failed before completing on all nodes. Use this procedure only for nodes you upgraded to 2022.1
|
||||
|
||||
ScyllaDB rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes you rollback to 2021.1, you will:
|
||||
|
||||
* Drain the node and stop ScyllaDB
|
||||
* Retrieve the old Scylla packages
|
||||
* Restore the configuration file
|
||||
* Restart ScyllaDB
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback Steps
|
||||
==============
|
||||
Gracefully shutdown ScyllaDB
|
||||
----------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the old release
|
||||
------------------------------------
|
||||
#. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
#. Update the |APT|_ to **2021.1**.
|
||||
#. Install:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get clean all
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla-enterprise
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-2021.1 /etc/scylla/scylla.yaml
|
||||
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from the previous snapshot - 2022.1 uses a different set of system tables. Refer to :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cd /var/lib/scylla/data/keyspace_name/table_name-UUID/snapshots/<snapshot_name>/
|
||||
sudo cp -r * /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
sudo chown -R scylla:scylla /var/lib/scylla/data/keyspace_name/table_name-UUID/
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instructions above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -0,0 +1,2 @@
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-ubuntu-and-debian-p1.rst
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-ubuntu-and-debian-p2.rst
|
||||
@@ -2,89 +2,84 @@
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to version |SRC_VERSION| if required.
|
||||
|
||||
|
||||
Applicable versions
|
||||
Applicable Versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from the following versions: |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y, on the following platforms:
|
||||
|
||||
* |OS|
|
||||
This guide covers upgrading |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION| on |OS|.
|
||||
See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
Upgrading your Scylla version is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
Upgrading your ScyllaDB version is a rolling procedure that does not require a full cluster shutdown. For each of the nodes in the cluster, serially (i.e. one at a time), you will:
|
||||
|
||||
* Check the cluster's schema
|
||||
* Drain the node and backup the data
|
||||
* Backup the configuration file
|
||||
* Stop the Scylla service
|
||||
* Download and install new Scylla packages
|
||||
* Start the Scylla service
|
||||
* Stop ScyllaDB
|
||||
* Download and install new ScyllaDB packages
|
||||
* Start ScyllaDB
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running the new version.
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to use the new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/index.html>`_ for suspending ScyllaDB Manager (only available for ScyllaDB Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use the latest `Scylla Montioring <https://monitoring.docs.scylladb.com/>`_ stack.
|
||||
.. note:: Before upgrading, make sure to use the latest `ScyllaDB Montioring <https://monitoring.docs.scylladb.com/>`_ stack.
|
||||
|
||||
Upgrade steps
|
||||
Upgrade Steps
|
||||
=============
|
||||
Check the cluster schema
|
||||
------------------------
|
||||
Make sure that all nodes have the schema synched prior to upgrade as any schema disagreement between the nodes causes the upgrade to fail.
|
||||
Make sure that all nodes have the schema synced before the upgrade. The upgrade will fail if there is a schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool describecluster
|
||||
|
||||
Drain node and backup the data
|
||||
------------------------------
|
||||
Before any major procedure, like an upgrade, it is **highly recommended** to backup all the data to an external device. In Scylla, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
Drain the nodes and backup the data
|
||||
--------------------------------------
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In ScyllaDB, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to an external backup device.
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having that name under ``/var/lib/scylla`` to an external backup device.
|
||||
|
||||
When the upgrade is complete (for all nodes), remove the snapshot by running ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of disk space.
|
||||
When the upgrade is completed on all nodes, remove the snapshot with the ``nodetool clearsnapshot -t <snapshot>`` command to prevent running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
Backup the configuration file
|
||||
------------------------------
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-src
|
||||
|
||||
Stop Scylla
|
||||
-----------
|
||||
Stop ScyllaDB
|
||||
---------------
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what Scylla version you are currently running with ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
Before upgrading, check what version you are running now using ``rpm -qa | grep scylla-server``. You should use the same version as this version in case you want to :ref:`rollback <rollback-procedure>` the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
To upgrade:
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
2. Install the new Scylla version
|
||||
#. Update the |SCYLLA_REPO|_ to |NEW_VERSION|.
|
||||
#. Install the new ScyllaDB version:
|
||||
|
||||
.. code:: sh
|
||||
.. code:: sh
|
||||
|
||||
sudo yum clean all
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
sudo yum clean all
|
||||
sudo yum update scylla\* -y
|
||||
|
||||
|
||||
Start the node
|
||||
@@ -94,62 +89,63 @@ Start the node
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version. Validate that the version matches the one you upgraded to.
|
||||
3. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
4. Check again after two minutes, to validate no new issues are introduced.
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version. Validate that the version matches the one you upgraded to.
|
||||
#. Use ``journalctl _COMM=scylla`` to check there are no new errors in the log.
|
||||
#. Check again after two minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade was successful, move to the next node in the cluster.
|
||||
|
||||
* More on |Scylla_METRICS|_
|
||||
See |Scylla_METRICS|_ for more information..
|
||||
|
||||
Rollback Procedure
|
||||
==================
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for the nodes that you upgraded to |NEW_VERSION|
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for the nodes that you upgraded to |NEW_VERSION|.
|
||||
|
||||
|
||||
Scylla rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
ScyllaDB rollback is a rolling procedure that does **not** require a full cluster shutdown.
|
||||
For each of the nodes you rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Drain the node and stop ScyllaDB
|
||||
* Retrieve the old ScyllaDB packages
|
||||
* Restore the configuration file
|
||||
* Reload the systemd configuration
|
||||
* Restart the Scylla service
|
||||
* Restart ScyllaDB
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the rollback was successful and that the node is up and running with the old version.
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the rollback was successful and that the node is up and running the old version.
|
||||
|
||||
Rollback steps
|
||||
Rollback Steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
Gracefully shutdown ScyllaDB
|
||||
-----------------------------
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
.. include:: /rst_include/scylla-commands-stop-index.rst
|
||||
|
||||
Download and install the new release
|
||||
Download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
#. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
sudo rm -rf /etc/yum.repos.d/scylla.repo
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
#. Update the |SCYLLA_REPO|_ to |SRC_VERSION|.
|
||||
#. Install:
|
||||
|
||||
.. code:: console
|
||||
|
||||
sudo yum clean all
|
||||
sudo rm -rf /var/cache/yum
|
||||
sudo yum remove scylla\\*tools-core
|
||||
sudo yum downgrade scylla\\* -y
|
||||
sudo yum install scylla
|
||||
|
||||
.. parsed-literal::
|
||||
\ sudo yum clean all
|
||||
\ sudo rm -rf /var/cache/yum
|
||||
\ sudo yum remove scylla\\*tools-core
|
||||
\ sudo yum downgrade scylla\\* -y
|
||||
\ sudo yum install |PKG_NAME|
|
||||
\
|
||||
|
||||
Restore the configuration file
|
||||
------------------------------
|
||||
@@ -162,7 +158,7 @@ Restore the configuration file
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot, |NEW_VERSION| uses a different set of system tables. Reference doc: :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>`
|
||||
Restore all tables of **system** and **system_schema** from previous snapshot because |NEW_VERSION| uses a different set of system tables. See :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>` for details.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
@@ -173,7 +169,7 @@ Restore all tables of **system** and **system_schema** from previous snapshot, |
|
||||
Reload systemd configuration
|
||||
---------------------------------
|
||||
|
||||
Require to reload the unit file if the systemd unit file is changed.
|
||||
You must reload the unit file if the systemd unit file is changed.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
@@ -182,9 +178,8 @@ Require to reload the unit file if the systemd unit file is changed.
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
|
||||
.. include:: /rst_include/scylla-commands-start-index.rst
|
||||
|
||||
Validate
|
||||
--------
|
||||
Check the upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster. Keep in mind that the version you want to see on your node is the old version, which you noted at the beginning of the procedure.
|
||||
Check the upgrade instructions above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
|
||||
@@ -96,30 +96,6 @@ Answer ‘y’ to the first two questions.
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1, need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`
|
||||
|
||||
Update 3rd party and OS packages
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionadded:: Scylla 5.0
|
||||
.. versionadded:: Scylla Enterprise 2021.1.10
|
||||
|
||||
This step is optional. It is recommended if you run a Scylla official image (EC2 AMI, GCP, and Azure images) based on Ubuntu 20.04.
|
||||
|
||||
Run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-packages-xxx-x86_64.txt | sudo xargs -n1 apt-get -y
|
||||
|
||||
|
||||
Where xxx is the relevant Scylla version ( |NEW_VERSION| ). The file is included in the Scylla packages downloaded in the previous step.
|
||||
|
||||
For example
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-packages-5.1.2-x86_64.txt | sudo xargs -n1 apt-get -y
|
||||
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
======================================================================
|
||||
|
||||
This document is a step-by-step procedure for upgrading from |SCYLLA_NAME| |FROM| to |SCYLLA_NAME| |TO|, and rollback to 2021.1 if required.
|
||||
|
||||
|
||||
Applicable Versions
|
||||
------------------------
|
||||
This guide covers upgrading |SCYLLA_NAME| from version |FROM| to version |TO| on |OS|. See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
|
||||
|
||||
Upgrade Procedure
|
||||
----------------------------
|
||||
|
||||
.. note::
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running the new version.
|
||||
|
||||
A ScyllaDB upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Drain node and backup the data.
|
||||
* Check your current release.
|
||||
* Backup configuration file.
|
||||
* Stop ScyllaDB.
|
||||
* Download and install new ScyllaDB packages.
|
||||
* Start ScyllaDB.
|
||||
* Validate that the upgrade was successful.
|
||||
|
||||
|
||||
**During** the rolling upgrade it is highly recommended:
|
||||
|
||||
* Not to use new |TO| features.
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes.
|
||||
* Not to apply schema changes.
|
||||
|
||||
Upgrade steps
|
||||
-------------------------------
|
||||
Drain node and backup the data
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Before any major procedure, like an upgrade, it is recommended to backup all the data to an external device. In ScyllaDB, backup is done using the ``nodetool snapshot`` command. For **each** node in the cluster, run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete (all nodes), the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>``, or you risk running out of space.
|
||||
|
||||
|
||||
Backup configuration file
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo cp -a /etc/scylla/scylla.yaml /etc/scylla/scylla.yaml.backup-5.x.z
|
||||
|
||||
Gracefully stop the node
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server stop
|
||||
|
||||
Download and install the new release
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |FROM| version, stop right here! This guide only covers |FROM| to |TO| upgrades.
|
||||
@@ -0,0 +1,84 @@
|
||||
**To upgrade ScyllaDB:**
|
||||
|
||||
#. Update the |APT|_ to |NEW_VERSION|.
|
||||
#. Install:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
^^^^^^^^^^^^^^^^
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the ScyllaDB version.
|
||||
#. Check the scylla-server log (execute ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check again after 2 minutes, to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
Rollback Procedure
|
||||
-----------------------
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from ScyllaDB release |TO| to |FROM|. Apply this procedure if an upgrade from |FROM| to |TO| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |TO|.
|
||||
|
||||
ScyllaDB rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |FROM|, you will:
|
||||
|
||||
* Drain the node and stop ScyllaDB.
|
||||
* Downgrade to previous release.
|
||||
* Restore the configuration file.
|
||||
* Restart ScyllaDB.
|
||||
* Validate the rollback success.
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
|
||||
Rollback steps
|
||||
------------------------
|
||||
Gracefully shutdown ScyllaDB
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo service scylla-server stop
|
||||
|
||||
Downgrade to previous release
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Install:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get install scylla=5.x.y\* scylla-server=5.x.y\* scylla-jmx=5.x.y\* scylla-tools=5.x.y\* scylla-tools-core=5.x.y\* scylla-kernel-conf=5.x.y\* scylla-conf=5.x.y\*
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Restore the configuration file
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/scylla/scylla.yaml
|
||||
sudo cp -a /etc/scylla/scylla.yaml.backup-5.x.z /etc/scylla/scylla.yaml
|
||||
|
||||
Start the node
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo service scylla-server start
|
||||
|
||||
Validate
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
Check upgrade instruction above for validation. Once you are sure the node rollback is successful, move to the next node in the cluster.
|
||||
@@ -0,0 +1,2 @@
|
||||
.. include:: /upgrade/_common/upgrade-guide-v5-patch-ubuntu-and-debian-p1.rst
|
||||
.. include:: /upgrade/_common/upgrade-guide-v5-patch-ubuntu-and-debian-p2.rst
|
||||
@@ -2,49 +2,43 @@
|
||||
Upgrade Guide - |SCYLLA_NAME| |SRC_VERSION| to |NEW_VERSION| for |OS|
|
||||
=============================================================================
|
||||
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to |SRC_VERSION| if required.
|
||||
This document is a step by step procedure for upgrading from |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION|, and rollback to version |SRC_VERSION| if required.
|
||||
|
||||
..
|
||||
Relevant and tested for Ubuntu 20.04. Remove from other OSes and versions.
|
||||
|
||||
There are two upgrade alternatives: you can upgrade ScyllaDB simultaneously updating 3rd party and OS packages (recommended for Ubuntu 20.04), or upgrade ScyllaDB without updating any external packages.
|
||||
|
||||
Applicable versions
|
||||
Applicable Versions
|
||||
===================
|
||||
This guide covers upgrading Scylla from version |SRC_VERSION|.x or later to |SCYLLA_NAME| version |NEW_VERSION|.y on the following platform:
|
||||
|
||||
* |OS|
|
||||
This guide covers upgrading |SCYLLA_NAME| |SRC_VERSION| to |SCYLLA_NAME| |NEW_VERSION| on |OS|.
|
||||
See :doc:`OS Support by Platform and Version </getting-started/os-support>` for information about supported versions.
|
||||
|
||||
Upgrade Procedure
|
||||
=================
|
||||
|
||||
A Scylla upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
A ScyllaDB upgrade is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes in the cluster, you will:
|
||||
|
||||
* Check cluster schema
|
||||
* Drain node and backup the data
|
||||
* Backup configuration file
|
||||
* Stop Scylla
|
||||
* Download and install new Scylla packages
|
||||
* Start Scylla
|
||||
* Check the cluster's schema
|
||||
* Drain the node and backup the data
|
||||
* Backup the configuration file
|
||||
* Stop ScyllaDB
|
||||
* Download and install new ScyllaDB packages
|
||||
* Start ScyllaDB
|
||||
* Validate that the upgrade was successful
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating that the node you upgraded is up and running with the new version.
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating that the node you upgraded is up and running the new version.
|
||||
|
||||
|
||||
**During** the rolling upgrade, it is highly recommended:
|
||||
|
||||
* Not to use new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `here <https://manager.docs.scylladb.com/stable/sctool/>`_ for suspending Scylla Manager (only available Scylla Enterprise) scheduled or running repairs.
|
||||
* Not to use the new |NEW_VERSION| features
|
||||
* Not to run administration functions, like repairs, refresh, rebuild or add or remove nodes. See `sctool <https://manager.docs.scylladb.com/stable/sctool/>`_ for suspending ScyllaDB Manager (only available for ScyllaDB Enterprise) scheduled or running repairs.
|
||||
* Not to apply schema changes
|
||||
|
||||
.. note:: Before upgrading, make sure to use the latest `Scylla Montioring <https://monitoring.docs.scylladb.com/>`_ stack.
|
||||
.. note:: Before upgrading, make sure to use the latest `ScyllaDB Montioring <https://monitoring.docs.scylladb.com/>`_ stack.
|
||||
|
||||
Upgrade steps
|
||||
Upgrade Steps
|
||||
=============
|
||||
Check the cluster schema
|
||||
-------------------------
|
||||
Make sure that all nodes have the schema synched before the upgrade. The upgrade will fail if there is any disagreement between the nodes.
|
||||
Make sure that all nodes have the schema synced before the upgrade. The upgrade will fail if there is a schema disagreement between nodes.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
@@ -59,12 +53,12 @@ Before any major procedure, like an upgrade, it is recommended to backup all the
|
||||
nodetool drain
|
||||
nodetool snapshot
|
||||
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having this name under ``/var/lib/scylla`` to a backup device.
|
||||
Take note of the directory name that nodetool gives you, and copy all the directories having that name under ``/var/lib/scylla`` to a backup device.
|
||||
|
||||
When the upgrade is complete on all nodes, the snapshot should be removed by ``nodetool clearsnapshot -t <snapshot>`` to prevent running out of space.
|
||||
When the upgrade is completed on all nodes, remove the snapshot with the ``nodetool clearsnapshot -t <snapshot>`` command to prevent running out of space.
|
||||
|
||||
Backup configuration file
|
||||
-------------------------
|
||||
Backup the configuration file
|
||||
------------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
@@ -81,27 +75,4 @@ Download and install the new release
|
||||
------------------------------------
|
||||
Before upgrading, check what version you are running now using ``dpkg -s scylla-server``. You should use the same version in case you want to |ROLLBACK|_ the upgrade. If you are not running a |SRC_VERSION|.x version, stop right here! This guide only covers |SRC_VERSION|.x to |NEW_VERSION|.y upgrades.
|
||||
|
||||
**To upgrade ScyllaDB:**
|
||||
|
||||
1. Update the |SCYLLA_REPO|_ to |NEW_VERSION|
|
||||
|
||||
2. Install
|
||||
|
||||
.. code-block::
|
||||
|
||||
sudo apt-get clean all
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade |PKG_NAME|
|
||||
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
**To upgrade ScyllaDB and update 3rd party and OS packages:**
|
||||
|
||||
.. include:: /upgrade/_common/upgrade-image.rst
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Alternator users upgrading from Scylla 4.0 to 4.1 need to set :doc:`default isolation level </upgrade/upgrade-opensource/upgrade-guide-from-4.0-to-4.1/alternator>`.
|
||||
|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
**To upgrade ScyllaDB:**
|
||||
|
||||
#. Update the |SCYLLA_REPO|_ to |NEW_VERSION|.
|
||||
|
||||
#. Install:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
sudo apt-get clean all
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade scylla
|
||||
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
Start the node
|
||||
--------------
|
||||
|
||||
@@ -7,10 +22,10 @@ Start the node
|
||||
|
||||
Validate
|
||||
--------
|
||||
1. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
2. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version.
|
||||
3. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
4. Check again after two minutes to validate no new issues are introduced.
|
||||
#. Check cluster status with ``nodetool status`` and make sure **all** nodes, including the one you just upgraded, are in UN status.
|
||||
#. Use ``curl -X GET "http://localhost:10000/storage_service/scylla_release_version"`` to check the Scylla version.
|
||||
#. Check scylla-server log (by ``journalctl _COMM=scylla``) and ``/var/log/syslog`` to validate there are no errors.
|
||||
#. Check again after two minutes to validate no new issues are introduced.
|
||||
|
||||
Once you are sure the node upgrade is successful, move to the next node in the cluster.
|
||||
|
||||
@@ -21,25 +36,25 @@ Rollback Procedure
|
||||
|
||||
.. include:: /upgrade/_common/warning_rollback.rst
|
||||
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| release |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|.
|
||||
The following procedure describes a rollback from |SCYLLA_NAME| |NEW_VERSION|.x to |SRC_VERSION|.y. Apply this procedure if an upgrade from |SRC_VERSION| to |NEW_VERSION| failed before completing on all nodes. Use this procedure only for nodes you upgraded to |NEW_VERSION|.
|
||||
|
||||
Scylla rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes rollback to |SRC_VERSION|, you will:
|
||||
ScyllaDB rollback is a rolling procedure which does **not** require full cluster shutdown.
|
||||
For each of the nodes you rollback to |SRC_VERSION|, you will:
|
||||
|
||||
* Drain the node and stop Scylla
|
||||
* Retrieve the old Scylla packages
|
||||
* Retrieve the old ScyllaDB packages
|
||||
* Restore the configuration file
|
||||
* Restore system tables
|
||||
* Reload systemd configuration
|
||||
* Restart Scylla
|
||||
* Restart ScyllaDB
|
||||
* Validate the rollback success
|
||||
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running with the new version.
|
||||
Apply the following procedure **serially** on each node. Do not move to the next node before validating the node is up and running the old version.
|
||||
|
||||
Rollback steps
|
||||
Rollback Steps
|
||||
==============
|
||||
Gracefully shutdown Scylla
|
||||
--------------------------
|
||||
Gracefully shutdown ScyllaDB
|
||||
----------------------------
|
||||
|
||||
.. code:: sh
|
||||
|
||||
@@ -48,20 +63,20 @@ Gracefully shutdown Scylla
|
||||
|
||||
Download and install the old release
|
||||
------------------------------------
|
||||
1. Remove the old repo file.
|
||||
#. Remove the old repo file.
|
||||
|
||||
.. code:: sh
|
||||
.. code:: sh
|
||||
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
sudo rm -rf /etc/apt/sources.list.d/scylla.list
|
||||
|
||||
2. Update the |SCYLLA_REPO|_ to |SRC_VERSION|
|
||||
3. Install
|
||||
#. Update the |SCYLLA_REPO|_ to |SRC_VERSION|.
|
||||
#. Install:
|
||||
|
||||
.. code-block::
|
||||
.. code-block::
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install |PKG_NAME|
|
||||
sudo apt-get update
|
||||
sudo apt-get remove scylla\* -y
|
||||
sudo apt-get install scylla
|
||||
|
||||
Answer ‘y’ to the first two questions.
|
||||
|
||||
@@ -75,7 +90,7 @@ Restore the configuration file
|
||||
Restore system tables
|
||||
---------------------
|
||||
|
||||
Restore all tables of **system** and **system_schema** from the previous snapshot - |NEW_VERSION| uses a different set of system tables. See :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>` for reference.
|
||||
Restore all tables of **system** and **system_schema** from the previous snapshot because |NEW_VERSION| uses a different set of system tables. See :doc:`Restore from a Backup and Incremental Backup </operating-scylla/procedures/backup-restore/restore/>` for reference.
|
||||
|
||||
.. code:: sh
|
||||
|
||||
|
||||
52
docs/upgrade/_common/upgrade-image-enterprise.rst
Normal file
52
docs/upgrade/_common/upgrade-image-enterprise.rst
Normal file
@@ -0,0 +1,52 @@
|
||||
There are two alternative upgrade procedures:
|
||||
|
||||
* :ref:`Upgrading ScyllaDB and simultaneously updating 3rd party and OS packages <upgrade-image-recommended-procedure>`. It is recommended if you are running a ScyllaDB official image (EC2 AMI, GCP, and Azure images), which is based on Ubuntu 20.04.
|
||||
|
||||
* :ref:`Upgrading ScyllaDB without updating any external packages <upgrade-image-enterprise-upgrade-guide-regular-procedure>`.
|
||||
|
||||
.. _upgrade-image-recommended-procedure:
|
||||
|
||||
**To upgrade ScyllaDB and update 3rd party and OS packages (RECOMMENDED):**
|
||||
|
||||
.. versionadded:: 2021.1.10
|
||||
|
||||
Choosing this upgrade procedure allows you to upgrade your ScyllaDB version and update the 3rd party and OS packages using one command.
|
||||
|
||||
#. Update the |SCYLLA_REPO|_ to |NEW_VERSION|.
|
||||
|
||||
#. Load the new repo:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
#. Run the following command to update the manifest file:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-enterprise-packages-<version>-<arch>.txt | sudo xargs -n1 apt-get install -y
|
||||
|
||||
Where:
|
||||
|
||||
* ``<version>`` - The ScyllaDB version to which you are upgrading ( |NEW_VERSION| ).
|
||||
* ``<arch>`` - Architecture type: ``x86_64`` or ``aarch64``.
|
||||
|
||||
The file is included in the ScyllaDB packages downloaded in the previous step. The file location is ``http://downloads.scylladb.com/downloads/scylla-enterprise/aws/manifest/scylla-enterprise-packages-<version>-<arch>.txt``.
|
||||
|
||||
Example:
|
||||
|
||||
.. code:: console
|
||||
|
||||
cat scylla-enterprise-packages-2022.1.10-x86_64.txt | sudo xargs -n1 apt-get install -y
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
Alternatively, you can update the manifest file with the following command:
|
||||
|
||||
``sudo apt-get install $(awk '{print $1'} scylla-enterprise-packages-<version>-<arch>.txt) -y``
|
||||
|
||||
|
||||
|
||||
|
||||
.. _upgrade-image-enterprise-upgrade-guide-regular-procedure:
|
||||
49
docs/upgrade/_common/upgrade-image-opensource.rst
Normal file
49
docs/upgrade/_common/upgrade-image-opensource.rst
Normal file
@@ -0,0 +1,49 @@
|
||||
There are two alternative upgrade procedures:
|
||||
|
||||
* :ref:`Upgrading ScyllaDB and simultaneously updating 3rd party and OS packages <upgrade-image-recommended-procedure>`. It is recommended if you are running a ScyllaDB official image (EC2 AMI, GCP, and Azure images), which is based on Ubuntu 20.04.
|
||||
|
||||
* :ref:`Upgrading ScyllaDB without updating any external packages <upgrade-image-upgrade-guide-regular-procedure>`.
|
||||
|
||||
.. _upgrade-image-recommended-procedure:
|
||||
|
||||
**To upgrade ScyllaDB and update 3rd party and OS packages (RECOMMENDED):**
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
Choosing this upgrade procedure allows you to upgrade your ScyllaDB version and update the 3rd party and OS packages using one command.
|
||||
|
||||
#. Update the |SCYLLA_REPO|_ to |NEW_VERSION|.
|
||||
|
||||
#. Load the new repo:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
|
||||
#. Run the following command to update the manifest file:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-packages-<version>-<arch>.txt | sudo xargs -n1 apt-get install -y
|
||||
|
||||
Where:
|
||||
|
||||
* ``<version>`` - The ScyllaDB version to which you are upgrading ( |NEW_VERSION| ).
|
||||
* ``<arch>`` - Architecture type: ``x86_64`` or ``aarch64``.
|
||||
|
||||
The file is included in the ScyllaDB packages downloaded in the previous step. The file location is ``http://downloads.scylladb.com/downloads/scylla/aws/manifest/scylla-packages-<version>-<arch>.txt``
|
||||
|
||||
Example:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-packages-5.1.2-x86_64.txt | sudo xargs -n1 apt-get install -y
|
||||
|
||||
.. note::
|
||||
|
||||
Alternatively, you can update the manifest file with the following command:
|
||||
|
||||
``sudo apt-get install $(awk '{print $1'} scylla-packages-<version>-<arch>.txt) -y``
|
||||
|
||||
.. _upgrade-image-upgrade-guide-regular-procedure:
|
||||
@@ -1,21 +0,0 @@
|
||||
.. versionadded:: Scylla 5.0
|
||||
.. versionadded:: Scylla Enterprise 2021.1.10
|
||||
|
||||
This alternative installation upgrade method allows you to upgrade your ScyllaDB version and update the 3rd party and OS packages using one command. This method is recommended if you run a ScyllaDB official image (EC2 AMI, GCP, and Azure images) based on Ubuntu 20.04.
|
||||
|
||||
#. Update the |SCYLLA_REPO|_ to |NEW_VERSION|.
|
||||
|
||||
#. Run the following command:
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-packages-xxx-x86_64.txt | sudo xargs -n1 apt-get -y
|
||||
|
||||
|
||||
Where xxx is the relevant Scylla version ( |NEW_VERSION| ). The file is included in the Scylla packages downloaded in the previous step.
|
||||
|
||||
For example
|
||||
|
||||
.. code:: sh
|
||||
|
||||
cat scylla-packages-5.1.2-x86_64.txt | sudo xargs -n1 apt-get -y
|
||||
@@ -7,12 +7,12 @@ Upgrade from ScyllaDB Enterprise 2021.1 to 2022.1
|
||||
:titlesonly:
|
||||
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2021.1-to-2022.1-rpm>
|
||||
Ubuntu 18.04 <upgrade-guide-from-2021.1-to-2022.1-ubuntu-18-04>
|
||||
Ubuntu 20.04 <upgrade-guide-from-2021.1-to-2022.1-ubuntu-20-04>
|
||||
Ubuntu <upgrade-guide-from-2021.1-to-2022.1-ubuntu>
|
||||
Debian <upgrade-guide-from-2021.1-to-2022.1-debian>
|
||||
ScyllaDB Image <upgrade-guide-from-2021.1-to-2022.1-image>
|
||||
Metrics <metric-update-2021.1-to-2022.1>
|
||||
|
||||
.. raw:: html
|
||||
.. raw:: htm
|
||||
|
||||
|
||||
<div class="panel callout radius animated">
|
||||
@@ -25,9 +25,9 @@ Upgrade from ScyllaDB Enterprise 2021.1 to 2022.1
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2021.1.x to 2022.1.y on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2021.1-to-2022.1-rpm>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2021.1.x to 2022.1.y on Ubuntu 18.04 <upgrade-guide-from-2021.1-to-2022.1-ubuntu-18-04>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2021.1.x to 2022.1.y on Ubuntu 20.04 <upgrade-guide-from-2021.1-to-2022.1-ubuntu-20-04>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2021.1.x to 2022.1.y on Ubuntu <upgrade-guide-from-2021.1-to-2022.1-ubuntu>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2021.1.x to 2022.1.y on Debian <upgrade-guide-from-2021.1-to-2022.1-debian>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise Image (EC2, GCP, and Azure) from 2021.1.x to 2022.1.y <upgrade-guide-from-2021.1-to-2022.1-image>`
|
||||
* :doc:`ScyllaDB Enterprise Metrics Update - Scylla 2021.1 to 2022.1<metric-update-2021.1-to-2022.1>`
|
||||
|
||||
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
.. |OS| replace:: Debian 9
|
||||
.. |OS| replace:: Debian
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-debian/#rollback-procedure
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1/#rollback-procedure
|
||||
.. |SRC_VERSION| replace:: 2021.1
|
||||
.. |NEW_VERSION| replace:: 2022.1
|
||||
.. |SCYLLA_NAME| replace:: ScyllaDB Enterprise
|
||||
.. |PKG_NAME| replace:: scylla
|
||||
.. |APT| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=debian-9&version=stable-release-2022.1
|
||||
.. |SCYLLA_REPO| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _SCYLLA_REPO: https://www.scylladb.com/customer-portal/?product=ent&platform=debian-9&version=stable-release-2022.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2021.1-to-2022.1-ubuntu-and-debian.rst
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-ubuntu-and-debian.rst
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
.. |OS| replace:: EC2, GCP, and Azure
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-image/#rollback-procedure
|
||||
.. |SRC_VERSION| replace:: 2021.1
|
||||
.. |NEW_VERSION| replace:: 2022.1
|
||||
.. |SCYLLA_NAME| replace:: ScyllaDB Image
|
||||
.. |PKG_NAME| replace:: scylla
|
||||
.. |APT| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. |SCYLLA_REPO| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _SCYLLA_REPO: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. |SCYLLA_METRICS| replace:: Scylla Metrics Update - Scylla 2021.1 to 2022.1
|
||||
.. _SCYLLA_METRICS: ../metric-update-2021.1-to-2022.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-ubuntu-and-debian-p1.rst
|
||||
.. include:: /upgrade/_common/upgrade-image-enterprise.rst
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-ubuntu-and-debian-p2.rst
|
||||
|
||||
@@ -7,7 +7,7 @@ This document is a step-by-step procedure for upgrading from ScyllaDB Enterprise
|
||||
|
||||
Applicable Versions
|
||||
===================
|
||||
This guide covers upgrading ScyllaDB from version **2021.1.8** or later to ScyllaDB Enterprise version 2021.1.y, on the following platforms:
|
||||
This guide covers upgrading ScyllaDB from version **2021.1.8** or later to ScyllaDB Enterprise version 2022.1.y, on the following platforms:
|
||||
|
||||
* Red Hat Enterprise Linux, version 7 and later
|
||||
* CentOS, version 7 and later
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: 18.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-ubuntu-18-04/#rollback-procedure
|
||||
.. |APT| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-18.04&version=stable-release-2022.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2021.1-to-2022.1-ubuntu-and-debian.rst
|
||||
@@ -1,7 +0,0 @@
|
||||
.. |OS| replace:: 20.04
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-ubuntu-18-04/#rollback-procedure
|
||||
.. |APT| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-from-2021.1-to-2022.1-ubuntu-and-debian.rst
|
||||
@@ -0,0 +1,13 @@
|
||||
.. |OS| replace:: Ubuntu
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: /upgrade/upgrade-enterprise/upgrade-guide-from-2021.1-to-2022.1/upgrade-guide-from-2021.1-to-2022.1-ubuntu/#rollback-procedure
|
||||
.. |SRC_VERSION| replace:: 2021.1
|
||||
.. |NEW_VERSION| replace:: 2022.1
|
||||
.. |SCYLLA_NAME| replace:: ScyllaDB Enterprise
|
||||
.. |PKG_NAME| replace:: scylla
|
||||
.. |APT| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. |SCYLLA_REPO| replace:: ScyllaDB Enterprise Deb repo
|
||||
.. _SCYLLA_REPO: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. |OPENJDK| replace:: openjdk-8-jre-headless
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-ubuntu-and-debian.rst
|
||||
@@ -6,6 +6,7 @@ Upgrade ScyllaDB Enterprise 2022
|
||||
:titlesonly:
|
||||
:hidden:
|
||||
|
||||
ScyllaDB Enterprise Image <upgrade-guide-from-2022.x.y-to-2022.x.z-image>
|
||||
Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2022.x.y-to-2022.x.z-rpm>
|
||||
Ubuntu 18.04 <upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-18-04>
|
||||
Ubuntu 20.04 <upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-20-04>
|
||||
@@ -23,6 +24,7 @@ Upgrade ScyllaDB Enterprise 2022
|
||||
|
||||
Upgrade guides are available for:
|
||||
|
||||
* :doc:`Upgrade ScyllaDB Image from 2022.x.y to 2022.x.z<upgrade-guide-from-2022.x.y-to-2022.x.z-image>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2022.x.y to 2022.x.z on Red Hat Enterprise Linux and CentOS <upgrade-guide-from-2022.x.y-to-2022.x.z-rpm>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2022.x.y to 2022.x.z on Ubuntu 18.04 <upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-18-04>`
|
||||
* :doc:`Upgrade ScyllaDB Enterprise from 2022.x.y to 2022.x.z on Ubuntu 20.04 <upgrade-guide-from-2022.x.y-to-2022.x.z-ubuntu-20-04>`
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
.. |OS| replace:: EC2, GCP, and Azure
|
||||
.. |ROLLBACK| replace:: rollback
|
||||
.. _ROLLBACK: ./#rollback-procedure
|
||||
.. |SRC_VERSION| replace:: 2022.x.y
|
||||
.. |NEW_VERSION| replace:: 2022.x.z
|
||||
.. |FROM| replace:: 2022.x.y
|
||||
.. |TO| replace:: 2022.x.z
|
||||
.. |SCYLLA_NAME| replace:: ScyllaDB Enterprise Image
|
||||
.. |PKG_NAME| replace:: scylla
|
||||
.. |SCYLLA_REPO| replace:: ScyllaDB Enterprise deb repo
|
||||
.. _SCYLLA_REPO: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. |APT| replace:: ScyllaDB deb repo
|
||||
.. _APT: https://www.scylladb.com/customer-portal/?product=ent&platform=ubuntu-20.04&version=stable-release-2022.1
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-patch-ubuntu-and-debian-p1.rst
|
||||
.. include:: /upgrade/_common/upgrade-image-enterprise.rst
|
||||
.. include:: /upgrade/_common/upgrade-guide-v2022-patch-ubuntu-and-debian-p2.rst
|
||||
@@ -63,7 +63,7 @@ Stop ScyllaDB
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl stop scylla-enterprise-server
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Download and install the new release
|
||||
------------------------------------
|
||||
@@ -84,7 +84,7 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-enterprise-server
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
@@ -125,7 +125,7 @@ Gracefully shutdown ScyllaDB
|
||||
.. code:: sh
|
||||
|
||||
nodetool drain
|
||||
sudo systemctl stop scylla-enterprise-server
|
||||
sudo systemctl stop scylla-server
|
||||
|
||||
Downgrade to the previous release
|
||||
-----------------------------------
|
||||
@@ -149,7 +149,7 @@ Start the node
|
||||
|
||||
.. code:: sh
|
||||
|
||||
sudo systemctl start scylla-enterprise-server
|
||||
sudo systemctl start scylla-server
|
||||
|
||||
Validate
|
||||
--------
|
||||
|
||||
@@ -5,6 +5,7 @@ Upgrade ScyllaDB Open Source
|
||||
.. toctree::
|
||||
:hidden:
|
||||
|
||||
ScyllaDB 5.0 to 5.1 <upgrade-guide-from-5.0-to-5.1/index>
|
||||
ScyllaDB 5.x maintenance release <upgrade-guide-from-5.x.y-to-5.x.z/index>
|
||||
ScyllaDB 4.6 to 5.0 <upgrade-guide-from-4.6-to-5.0/index>
|
||||
ScyllaDb 4.5 to 4.6 <upgrade-guide-from-4.5-to-4.6/index>
|
||||
@@ -35,6 +36,7 @@ Upgrade ScyllaDB Open Source
|
||||
|
||||
Procedures for upgrading to a newer version of ScyllaDB Open Source.
|
||||
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.0 to 5.1 <upgrade-guide-from-5.0-to-5.1/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 5.x maintenance releases <upgrade-guide-from-5.x.y-to-5.x.z/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.6 to 5.0 <upgrade-guide-from-4.6-to-5.0/index>`
|
||||
* :doc:`Upgrade Guide - ScyllaDB 4.5 to 4.6 <upgrade-guide-from-4.5-to-4.6/index>`
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user