Merge 'treewide: drop thrift support' from Kefu Chai

thrift support was deprecated since ScyllaDB 5.2

> Thrift API - legacy ScyllaDB (and Apache Cassandra) API is
> deprecated and will be removed in followup release. Thrift has
> been disabled by default.

so let's drop it. in this change,

* thrift protocol support is dropped
* all references to thrift support in document are dropped
* the "thrift_version" column in system.local table is preserved for backward compatibility, as we could load from an existing system.local table which still contains this clolumn, so we need to write this column as well.
* "/storage_service/rpc_server" is only preserved for backward compatibility with java-based nodetool.

Fixes #3811
Fixes #18416
Signed-off-by: Kefu Chai <kefu.chai@scylladb.com>

- [x] not a fix, no need to backport

Closes scylladb/scylladb#18453

* github.com:scylladb/scylladb:
  config: expand on rpc_keepalive's description
  api: s/rpc/thrift/
  db/system_keyspace: drop thrift_version from system.local table
  transport: do not return client_type from cql_server::connection::make_client_key()
  treewide: drop thrift support
This commit is contained in:
Nadav Har'El
2024-06-17 22:36:49 +03:00
86 changed files with 146 additions and 5131 deletions

View File

@@ -14,4 +14,4 @@ jobs:
with:
only_warn: 1
ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison"
skip: "./.git,./build,./tools,*.js,*.thrift,*.lock,./test,./licenses,./redis/lolwut.cc,*.svg"
skip: "./.git,./build,./tools,*.js,*.lock,./test,./licenses,./redis/lolwut.cc,*.svg"

View File

@@ -100,7 +100,6 @@ find_package(libdeflate REQUIRED)
find_package(libxcrypt REQUIRED)
find_package(Snappy REQUIRED)
find_package(RapidJSON REQUIRED)
find_package(Thrift REQUIRED)
find_package(xxHash REQUIRED)
set(scylla_gen_build_dir "${CMAKE_BINARY_DIR}/gen")
@@ -198,7 +197,6 @@ add_subdirectory(dht)
add_subdirectory(gms)
add_subdirectory(idl)
add_subdirectory(index)
add_subdirectory(interface)
add_subdirectory(lang)
add_subdirectory(locator)
add_subdirectory(message)
@@ -216,7 +214,6 @@ add_subdirectory(service)
add_subdirectory(sstables)
add_subdirectory(streaming)
add_subdirectory(test)
add_subdirectory(thrift)
add_subdirectory(tools)
add_subdirectory(tracing)
add_subdirectory(transport)
@@ -257,7 +254,6 @@ target_link_libraries(scylla PRIVATE
sstables
streaming
test-perf
thrift
tools
tracing
transport

View File

@@ -68,8 +68,8 @@ $ ./tools/toolchain/dbuild ./build/release/scylla --help
See [test.py manual](docs/dev/testing.md).
## Scylla APIs and compatibility
By default, Scylla is compatible with Apache Cassandra and its APIs - CQL and
Thrift. There is also support for the API of Amazon DynamoDB™,
By default, Scylla is compatible with Apache Cassandra and its API - CQL.
There is also support for the API of Amazon DynamoDB™,
which needs to be enabled and configured in order to be used. For more
information on how to enable the DynamoDB™ API in Scylla,
and the current compatibility of this feature as well as Scylla-specific extensions, see

View File

@@ -1251,7 +1251,7 @@ future<executor::request_return_type> executor::update_table(client_state& clien
auto schema = builder.build();
auto m = co_await service::prepare_column_family_update_announcement(p.local(), schema, false, std::vector<view_ptr>(), group0_guard.write_timestamp());
auto m = co_await service::prepare_column_family_update_announcement(p.local(), schema, std::vector<view_ptr>(), group0_guard.write_timestamp());
co_await mm.announce(std::move(m), std::move(group0_guard), format("alternator-executor: update {} table", tab->cf_name()));

View File

@@ -67,7 +67,7 @@
"parameters":[
{
"name":"pluginid",
"description":"The plugin ID, describe the component the metric belongs to. Examples are cache, thrift, etc'. Regex are supported.The plugin ID, describe the component the metric belong to. Examples are: cache, thrift etc'. regex are supported",
"description":"The plugin ID, describe the component the metric belongs to. Examples are cache and alternator, etc'. Regex are supported.",
"required":true,
"allowMultiple":false,
"type":"string",
@@ -199,4 +199,4 @@
}
}
}
}
}

View File

@@ -1689,33 +1689,11 @@
{
"path":"/storage_service/rpc_server",
"operations":[
{
"method":"DELETE",
"summary":"Allows a user to disable thrift",
"type":"void",
"nickname":"stop_rpc_server",
"produces":[
"application/json"
],
"parameters":[
]
},
{
"method":"POST",
"summary":"allows a user to re-enable thrift",
"type":"void",
"nickname":"start_rpc_server",
"produces":[
"application/json"
],
"parameters":[
]
},
{
"method":"GET",
"summary":"Determine if thrift is running",
"type":"boolean",
"nickname":"is_rpc_server_running",
"nickname":"is_thrift_server_running",
"produces":[
"application/json"
],
@@ -2070,7 +2048,7 @@
"operations":[
{
"method":"POST",
"summary":"Enables/Disables tracing for the whole system. Only thrift requests can start tracing currently",
"summary":"Enables/Disables tracing for the whole system.",
"type":"void",
"nickname":"set_trace_probability",
"produces":[

View File

@@ -100,12 +100,12 @@ future<> unset_transport_controller(http_context& ctx) {
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_transport_controller(ctx, r); });
}
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl) {
return ctx.http_server.set_routes([&ctx, &ctl] (routes& r) { set_rpc_controller(ctx, r, ctl); });
future<> set_thrift_controller(http_context& ctx) {
return ctx.http_server.set_routes([&ctx] (routes& r) { set_thrift_controller(ctx, r); });
}
future<> unset_rpc_controller(http_context& ctx) {
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_rpc_controller(ctx, r); });
future<> unset_thrift_controller(http_context& ctx) {
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_thrift_controller(ctx, r); });
}
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client& group0_client) {

View File

@@ -46,7 +46,6 @@ class snitch_ptr;
} // namespace locator
namespace cql_transport { class controller; }
class thrift_controller;
namespace db {
class snapshot_ctl;
class config;
@@ -100,8 +99,8 @@ future<> set_server_repair(http_context& ctx, sharded<repair_service>& repair);
future<> unset_server_repair(http_context& ctx);
future<> set_transport_controller(http_context& ctx, cql_transport::controller& ctl);
future<> unset_transport_controller(http_context& ctx);
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl);
future<> unset_rpc_controller(http_context& ctx);
future<> set_thrift_controller(http_context& ctx);
future<> unset_thrift_controller(http_context& ctx);
future<> set_server_authorization_cache(http_context& ctx, sharded<auth::service> &auth_service);
future<> unset_server_authorization_cache(http_context& ctx);
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl);

View File

@@ -48,7 +48,6 @@
#include "db/extensions.hh"
#include "db/snapshot-ctl.hh"
#include "transport/controller.hh"
#include "thrift/controller.hh"
#include "locator/token_metadata.hh"
#include "cdc/generation_service.hh"
#include "locator/abstract_replication_strategy.hh"
@@ -337,36 +336,17 @@ void unset_transport_controller(http_context& ctx, routes& r) {
ss::is_native_transport_running.unset(r);
}
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl) {
ss::stop_rpc_server.set(r, [&ctl](std::unique_ptr<http::request> req) {
return smp::submit_to(0, [&] {
return ctl.request_stop_server();
}).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
});
ss::start_rpc_server.set(r, [&ctl](std::unique_ptr<http::request> req) {
return smp::submit_to(0, [&] {
return ctl.start_server();
}).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
});
ss::is_rpc_server_running.set(r, [&ctl] (std::unique_ptr<http::request> req) {
return smp::submit_to(0, [&] {
return !ctl.listen_addresses().empty();
}).then([] (bool running) {
return make_ready_future<json::json_return_type>(running);
// NOTE: preserved only for backward compatibility
void set_thrift_controller(http_context& ctx, routes& r) {
ss::is_thrift_server_running.set(r, [] (std::unique_ptr<http::request> req) {
return smp::submit_to(0, [] {
return make_ready_future<json::json_return_type>(false);
});
});
}
void unset_rpc_controller(http_context& ctx, routes& r) {
ss::stop_rpc_server.unset(r);
ss::start_rpc_server.unset(r);
ss::is_rpc_server_running.unset(r);
void unset_thrift_controller(http_context& ctx, routes& r) {
ss::is_thrift_server_running.unset(r);
}
void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {

View File

@@ -14,7 +14,6 @@
#include "db/data_listeners.hh"
namespace cql_transport { class controller; }
class thrift_controller;
namespace db {
class snapshot_ctl;
namespace view {
@@ -80,8 +79,8 @@ void set_repair(http_context& ctx, httpd::routes& r, sharded<repair_service>& re
void unset_repair(http_context& ctx, httpd::routes& r);
void set_transport_controller(http_context& ctx, httpd::routes& r, cql_transport::controller& ctl);
void unset_transport_controller(http_context& ctx, httpd::routes& r);
void set_rpc_controller(http_context& ctx, httpd::routes& r, thrift_controller& ctl);
void unset_rpc_controller(http_context& ctx, httpd::routes& r);
void set_thrift_controller(http_context& ctx, httpd::routes& r);
void unset_thrift_controller(http_context& ctx, httpd::routes& r);
void set_snapshot(http_context& ctx, httpd::routes& r, sharded<db::snapshot_ctl>& snap_ctl);
void unset_snapshot(http_context& ctx, httpd::routes& r);
seastar::future<json::json_return_type> run_toppartitions_query(db::toppartitions_query& q, http_context &ctx, bool legacy_request = false);

View File

@@ -207,7 +207,7 @@ public:
auto new_log_schema = create_log_schema(new_schema, log_schema ? std::make_optional(log_schema->id()) : std::nullopt, log_schema);
auto log_mut = log_schema
? db::schema_tables::make_update_table_mutations(db, keyspace.metadata(), log_schema, new_log_schema, timestamp, false)
? db::schema_tables::make_update_table_mutations(db, keyspace.metadata(), log_schema, new_log_schema, timestamp)
: db::schema_tables::make_create_table_mutations(new_log_schema, timestamp)
;

View File

@@ -33,7 +33,7 @@ sstring to_string(client_connection_stage ct);
struct client_data {
net::inet_address ip;
int32_t port;
client_type ct;
client_type ct = client_type::cql;
client_connection_stage connection_stage = client_connection_stage::established;
int32_t shard_id; /// ID of server-side shard which is processing the connection.

View File

@@ -1,47 +0,0 @@
#
# Copyright 2023-present ScyllaDB
#
#
# SPDX-License-Identifier: AGPL-3.0-or-later
#
find_package(PkgConfig REQUIRED)
pkg_check_modules(PC_thrift QUIET thrift)
find_library(thrift_LIBRARY
NAMES thrift
HINTS
${PC_thrift_LIBDIR}
${PC_thrift_LIBRARY_DIRS})
find_path(thrift_INCLUDE_DIR
NAMES thrift/Thrift.h
HINTS
${PC_thrift_INCLUDEDIR}
${PC_thrift_INCLUDE_DIRS})
mark_as_advanced(
thrift_LIBRARY
thrift_INCLUDE_DIR)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Thrift
REQUIRED_VARS
thrift_LIBRARY
thrift_INCLUDE_DIR
VERSION_VAR PC_thrift_VERSION)
if(Thrift_FOUND)
set(thrift_LIBRARIES ${thrift_LIBRARY})
set(thrift_INCLUDE_DIRS ${thrift_INCLUDE_DIR})
if(NOT(TARGET Thrift::thrift))
add_library(Thrift::thrift UNKNOWN IMPORTED)
set_target_properties(Thrift::thrift
PROPERTIES
IMPORTED_LOCATION ${thrift_LIBRARY}
INTERFACE_INCLUDE_DIRECTORIES ${thrift_INCLUDE_DIRS})
endif()
endif()

View File

@@ -199,8 +199,7 @@ cas_contention_timeout_in_ms: 1000
# of the snitch, which will be assumed to be on your classpath.
endpoint_snitch: SimpleSnitch
# The address or interface to bind the Thrift RPC service and native transport
# server to.
# The address or interface to bind the native transport server to.
#
# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
# to a single address, IP aliasing is not supported.
@@ -221,9 +220,6 @@ rpc_address: localhost
# rpc_interface: eth1
# rpc_interface_prefer_ipv6: false
# port for Thrift to listen for clients on
rpc_port: 9160
# port for REST API server
api_port: 10000
@@ -356,9 +352,6 @@ commitlog_total_space_in_mb: -1
# be rejected as invalid. The default is 256MB.
# native_transport_max_frame_size_in_mb: 256
# Whether to start the thrift rpc server.
# start_rpc: true
# enable or disable keepalive on rpc/native connections
# rpc_keepalive: true

View File

@@ -203,18 +203,6 @@ class Source(object):
def endswith(self, end):
return self.source.endswith(end)
class Thrift(Source):
def __init__(self, source, service):
Source.__init__(self, source, '.h', '.cpp')
self.service = service
def generated(self, gen_dir):
basename = os.path.splitext(os.path.basename(self.source))[0]
files = [basename + '_' + ext
for ext in ['types.cpp', 'types.h', 'constants.cpp', 'constants.h']]
files += [self.service + ext
for ext in ['.cpp', '.h']]
return [os.path.join(gen_dir, file) for file in files]
def default_target_arch():
if platform.machine() in ['i386', 'i686', 'x86_64']:
@@ -370,18 +358,6 @@ def check_for_lz4(cxx, cflags):
sys.exit(1)
def thrift_uses_boost_share_ptr():
# thrift version detection, see #4538
proc_res = subprocess.run(["thrift", "-version"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc_res_output = proc_res.stdout.decode("utf-8")
if proc_res.returncode != 0 and not re.search(r'^Thrift version', proc_res_output):
raise Exception("Thrift compiler must be missing: {}".format(proc_res_output))
thrift_version = proc_res_output.split(" ")[-1]
thrift_boost_versions = ["0.{}.".format(n) for n in range(1, 11)]
return any(filter(thrift_version.startswith, thrift_boost_versions))
def find_ninja():
ninja = which('ninja') or which('ninja-build')
if ninja:
@@ -742,8 +718,6 @@ arg_parser.add_argument('--optimization-level', action='append', dest='mode_o_le
help=f'Override default compiler optimization level for mode (defaults: {" ".join([x+"="+modes[x]["optimization-level"] for x in modes])})')
arg_parser.add_argument('--static-stdc++', dest='staticcxx', action='store_true',
help='Link libgcc and libstdc++ statically')
arg_parser.add_argument('--static-thrift', dest='staticthrift', action='store_true',
help='Link libthrift statically')
arg_parser.add_argument('--static-boost', dest='staticboost', action='store_true',
help='Link boost statically')
arg_parser.add_argument('--static-yaml-cpp', dest='staticyamlcpp', action='store_true',
@@ -982,10 +956,6 @@ scylla_core = (['message/messaging_service.cc',
'cql3/ut_name.cc',
'cql3/role_name.cc',
'data_dictionary/data_dictionary.cc',
'thrift/handler.cc',
'thrift/server.cc',
'thrift/controller.cc',
'thrift/thrift_validation.cc',
'utils/runtime.cc',
'utils/murmur_hash.cc',
'utils/uuid.cc',
@@ -1204,7 +1174,7 @@ scylla_core = (['message/messaging_service.cc',
'service/topology_mutation.cc',
'service/topology_coordinator.cc',
'node_ops/node_ops_ctl.cc'
] + [Antlr3Grammar('cql3/Cql.g')] + [Thrift('interface/cassandra.thrift', 'Cassandra')] \
] + [Antlr3Grammar('cql3/Cql.g')] \
+ scylla_raft_core
)
@@ -1849,9 +1819,6 @@ libs = ' '.join([maybe_static(args.staticyamlcpp, '-lyaml-cpp'), '-latomic', '-l
if not args.staticboost:
user_cflags += ' -DBOOST_ALL_DYN_LINK'
if thrift_uses_boost_share_ptr():
user_cflags += ' -DTHRIFT_USES_BOOST'
for pkg in pkgs:
user_cflags += ' ' + pkg_config(pkg, '--cflags')
libs += ' ' + pkg_config(pkg, '--libs')
@@ -2035,10 +2002,6 @@ def write_build_file(f,
rule ar.{mode}
command = rm -f $out; ar cr $out $in; ranlib $out
description = AR $out
rule thrift.{mode}
command = thrift -gen cpp:cob_style -out $builddir/{mode}/gen $in
description = THRIFT $in
restat = 1
rule antlr3.{mode}
# We replace many local `ExceptionBaseType* ex` variables with a single function-scope one.
# Because we add such a variable to every function, and because `ExceptionBaseType` is not a global
@@ -2082,7 +2045,6 @@ def write_build_file(f,
compiles = {}
swaggers = set()
serializers = {}
thrifts = set()
ragels = {}
antlr3_grammars = set()
rust_headers = {}
@@ -2098,12 +2060,8 @@ def write_build_file(f,
for src in srcs
if src.endswith('.cc')]
objs.append('$builddir/../utils/arch/powerpc/crc32-vpmsum/crc32.S')
has_thrift = False
has_rust = False
for dep in deps[binary]:
if isinstance(dep, Thrift):
has_thrift = True
objs += dep.objects('$builddir/' + mode + '/gen')
if isinstance(dep, Antlr3Grammar):
objs += dep.objects(f'$builddir/{mode}/gen')
if isinstance(dep, Json2Code):
@@ -2116,9 +2074,6 @@ def write_build_file(f,
if has_rust:
objs.append(f'$builddir/{mode}/rust-{mode}/librust_combined.a')
local_libs = f'$seastar_libs_{mode} $libs'
if has_thrift:
local_libs += ' ' + maybe_static(args.staticthrift, '-lthrift')
local_libs += ' ' + maybe_static(args.staticboost, '-lboost_system')
objs.extend([f'$builddir/{mode}/abseil/{lib}' for lib in abseil_libs])
if binary in tests:
if binary in pure_boost_tests:
@@ -2156,8 +2111,6 @@ def write_build_file(f,
elif src.endswith('.rl'):
hh = '$builddir/' + mode + '/gen/' + src.replace('.rl', '.hh')
ragels[hh] = src
elif src.endswith('.thrift'):
thrifts.add(src)
elif src.endswith('.g'):
antlr3_grammars.add(src)
elif src.endswith('.rs'):
@@ -2207,8 +2160,6 @@ def write_build_file(f,
gen_dir = '$builddir/{}/gen'.format(mode)
gen_headers = []
for th in thrifts:
gen_headers += th.headers('$builddir/{}/gen'.format(mode))
for g in antlr3_grammars:
gen_headers += g.headers('$builddir/{}/gen'.format(mode))
for g in swaggers:
@@ -2247,12 +2198,6 @@ def write_build_file(f,
f.write('build {}: cxxbridge_header\n'.format('$builddir/{}/gen/rust/cxx.h'.format(mode)))
librust = '$builddir/{}/rust-{}/librust_combined'.format(mode, mode)
f.write('build {}.a: rust_lib.{} rust/Cargo.lock\n depfile={}.d\n'.format(librust, mode, librust))
for thrift in thrifts:
outs = ' '.join(thrift.generated('$builddir/{}/gen'.format(mode)))
f.write('build {}: thrift.{} {}\n'.format(outs, mode, thrift.source))
for cc in thrift.sources('$builddir/{}/gen'.format(mode)):
obj = cc.replace('.cpp', '.o')
f.write('build {}: cxx.{} {}\n'.format(obj, mode, cc))
for grammar in antlr3_grammars:
outs = ' '.join(grammar.generated('$builddir/{}/gen'.format(mode)))
f.write('build {}: antlr3.{} {}\n stem = {}\n'.format(outs, mode, grammar.source,

View File

@@ -59,7 +59,8 @@ public:
bool operator==(const authorized_prepared_statements_cache_key&) const = default;
static size_t hash(const auth::authenticated_user& user, const cql3::prepared_cache_key_type::cache_key_type& prep_cache_key) {
return utils::hash_combine(std::hash<auth::authenticated_user>()(user), utils::tuple_hash()(prep_cache_key));
return utils::hash_combine(std::hash<auth::authenticated_user>()(user),
std::hash<cql3::prepared_cache_key_type::cache_key_type>()(prep_cache_key));
}
};

View File

@@ -27,35 +27,30 @@ struct prepared_cache_entry_size {
};
typedef bytes cql_prepared_id_type;
typedef int32_t thrift_prepared_id_type;
/// \brief The key of the prepared statements cache
///
/// We are going to store the CQL and Thrift prepared statements in the same cache therefore we need generate the key
/// that is going to be unique in both cases. Thrift use int32_t as a prepared statement ID, CQL - MD5 digest.
///
/// We are going to use an std::pair<CQL_PREP_ID_TYPE, int64_t> as a key. For CQL statements we will use {CQL_PREP_ID, std::numeric_limits<int64_t>::max()} as a key
/// and for Thrift - {CQL_PREP_ID_TYPE(0), THRIFT_PREP_ID}. This way CQL and Thrift keys' values will never collide.
/// TODO: consolidate prepared_cache_key_type and the nested cache_key_type
/// the latter was introduced for unifying the CQL and Thrift prepared
/// statements so that they can be stored in the same cache.
class prepared_cache_key_type {
public:
using cache_key_type = std::pair<cql_prepared_id_type, int64_t>;
// derive from cql_prepared_id_type so we can customize the formatter of
// cache_key_type
struct cache_key_type : public cql_prepared_id_type {};
private:
cache_key_type _key;
public:
prepared_cache_key_type() = default;
explicit prepared_cache_key_type(cql_prepared_id_type cql_id) : _key(std::move(cql_id), std::numeric_limits<int64_t>::max()) {}
explicit prepared_cache_key_type(thrift_prepared_id_type thrift_id) : _key(cql_prepared_id_type(), thrift_id) {}
explicit prepared_cache_key_type(cql_prepared_id_type cql_id) : _key(std::move(cql_id)) {}
cache_key_type& key() { return _key; }
const cache_key_type& key() const { return _key; }
static const cql_prepared_id_type& cql_id(const prepared_cache_key_type& key) {
return key.key().first;
}
static thrift_prepared_id_type thrift_id(const prepared_cache_key_type& key) {
return key.key().second;
return key.key();
}
bool operator==(const prepared_cache_key_type& other) const = default;
@@ -98,7 +93,7 @@ private:
//
// Therefore a typical "pollution" (when a cache entry is used only once) would involve
// 2 cache hits.
using cache_type = utils::loading_cache<cache_key_type, prepared_cache_entry, 2, utils::loading_cache_reload_enabled::no, prepared_cache_entry_size, utils::tuple_hash, std::equal_to<cache_key_type>, prepared_cache_stats_updater, prepared_cache_stats_updater>;
using cache_type = utils::loading_cache<cache_key_type, prepared_cache_entry, 2, utils::loading_cache_reload_enabled::no, prepared_cache_entry_size, std::hash<cache_key_type>, std::equal_to<cache_key_type>, prepared_cache_stats_updater, prepared_cache_stats_updater>;
using cache_value_ptr = typename cache_type::value_ptr;
using checked_weak_ptr = typename statements::prepared_statement::checked_weak_ptr;
@@ -161,10 +156,18 @@ public:
}
namespace std {
template<>
struct hash<cql3::prepared_cache_key_type::cache_key_type> final {
size_t operator()(const cql3::prepared_cache_key_type::cache_key_type& k) const {
return std::hash<cql3::cql_prepared_id_type>()(k);
}
};
template<>
struct hash<cql3::prepared_cache_key_type> final {
size_t operator()(const cql3::prepared_cache_key_type& k) const {
return utils::tuple_hash()(k.key());
return std::hash<cql3::cql_prepared_id_type>()(k.key());
}
};
}
@@ -173,7 +176,7 @@ struct hash<cql3::prepared_cache_key_type> final {
template <> struct fmt::formatter<cql3::prepared_cache_key_type::cache_key_type> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
auto format(const cql3::prepared_cache_key_type::cache_key_type& p, fmt::format_context& ctx) const {
return fmt::format_to(ctx.out(), "{{cql_id: {}, thrift_id: {}}}", p.first, p.second);
return fmt::format_to(ctx.out(), "{{cql_id: {}}}", static_cast<const cql3::cql_prepared_id_type&>(p));
}
};

View File

@@ -655,24 +655,17 @@ query_processor::process_authorized_statement(const ::shared_ptr<cql_statement>
future<::shared_ptr<cql_transport::messages::result_message::prepared>>
query_processor::prepare(sstring query_string, service::query_state& query_state) {
auto& client_state = query_state.get_client_state();
return prepare(std::move(query_string), client_state, client_state.is_thrift());
return prepare(std::move(query_string), client_state);
}
future<::shared_ptr<cql_transport::messages::result_message::prepared>>
query_processor::prepare(sstring query_string, const service::client_state& client_state, bool for_thrift) {
query_processor::prepare(sstring query_string, const service::client_state& client_state) {
using namespace cql_transport::messages;
if (for_thrift) {
return prepare_one<result_message::prepared::thrift>(
std::move(query_string),
client_state,
compute_thrift_id, prepared_cache_key_type::thrift_id);
} else {
return prepare_one<result_message::prepared::cql>(
std::move(query_string),
client_state,
compute_id,
prepared_cache_key_type::cql_id);
}
return prepare_one<result_message::prepared::cql>(
std::move(query_string),
client_state,
compute_id,
prepared_cache_key_type::cql_id);
}
static std::string hash_target(std::string_view query_string, std::string_view keyspace) {
@@ -687,16 +680,6 @@ prepared_cache_key_type query_processor::compute_id(
return prepared_cache_key_type(md5_hasher::calculate(hash_target(query_string, keyspace)));
}
prepared_cache_key_type query_processor::compute_thrift_id(
const std::string_view& query_string,
const sstring& keyspace) {
uint32_t h = 0;
for (auto&& c : hash_target(query_string, keyspace)) {
h = 31*h + c;
}
return prepared_cache_key_type(static_cast<int32_t>(h));
}
std::unique_ptr<prepared_statement>
query_processor::get_statement(const sstring_view& query, const service::client_state& client_state) {
std::unique_ptr<raw::parsed_statement> statement = parse_statement(query);
@@ -1069,22 +1052,6 @@ future<> query_processor::announce_schema_statement(const statements::schema_alt
co_await remote_.get().mm.announce(std::move(m), std::move(guard), description);
}
future<std::string>
query_processor::execute_thrift_schema_command(
std::function<future<std::vector<mutation>>(data_dictionary::database, api::timestamp_type)> prepare_schema_mutations,
std::string_view description) {
assert(this_shard_id() == 0);
auto [remote_, holder] = remote();
auto& mm = remote_.get().mm;
auto group0_guard = co_await mm.start_group0_operation();
auto ts = group0_guard.write_timestamp();
co_await mm.announce(co_await prepare_schema_mutations(db(), ts), std::move(group0_guard), description);
co_return std::string(db().get_version().to_sstring());
}
query_processor::migration_subscriber::migration_subscriber(query_processor* qp) : _qp{qp} {
}

View File

@@ -139,10 +139,6 @@ public:
std::string_view query_string,
std::string_view keyspace);
static prepared_cache_key_type compute_thrift_id(
const std::string_view& query_string,
const sstring& keyspace);
static std::unique_ptr<statements::raw::parsed_statement> parse_statement(const std::string_view& query);
static std::vector<std::unique_ptr<statements::raw::parsed_statement>> parse_statements(std::string_view queries);
@@ -404,7 +400,7 @@ public:
prepare(sstring query_string, service::query_state& query_state);
future<::shared_ptr<cql_transport::messages::result_message::prepared>>
prepare(sstring query_string, const service::client_state& client_state, bool for_thrift);
prepare(sstring query_string, const service::client_state& client_state);
future<> stop();
@@ -445,11 +441,6 @@ public:
execute_schema_statement(const statements::schema_altering_statement&, service::query_state& state, const query_options& options, service::group0_batch& mc);
future<> announce_schema_statement(const statements::schema_altering_statement&, service::group0_batch& mc);
future<std::string>
execute_thrift_schema_command(
std::function<future<std::vector<mutation>>(data_dictionary::database, api::timestamp_type)> prepare_schema_mutations,
std::string_view description);
std::unique_ptr<statements::prepared_statement> get_statement(
const std::string_view& query,
const service::client_state& client_state);
@@ -520,10 +511,10 @@ private:
::shared_ptr<cql_statement> statement, service::query_state& query_state, const query_options& options);
///
/// \tparam ResultMsgType type of the returned result message (CQL or Thrift)
/// \tparam ResultMsgType type of the returned result message (CQL)
/// \tparam PreparedKeyGenerator a function that generates the prepared statement cache key for given query and
/// keyspace
/// \tparam IdGetter a function that returns the corresponding prepared statement ID (CQL or Thrift) for a given
/// \tparam IdGetter a function that returns the corresponding prepared statement ID (CQL) for a given
//// prepared statement cache key
/// \param query_string
/// \param client_state

View File

@@ -402,7 +402,7 @@ future<std::tuple<::shared_ptr<cql_transport::event::schema_change>, std::vector
alter_table_statement::prepare_schema_mutations(query_processor& qp, const query_options& options, api::timestamp_type ts) const {
data_dictionary::database db = qp.db();
auto [cfm, view_updates] = prepare_schema_update(db, options);
auto m = co_await service::prepare_column_family_update_announcement(qp.proxy(), cfm.build(), false, std::move(view_updates), ts);
auto m = co_await service::prepare_column_family_update_announcement(qp.proxy(), cfm.build(), std::move(view_updates), ts);
using namespace cql_transport;
auto ret = ::make_shared<event::schema_change>(

View File

@@ -88,7 +88,7 @@ future<std::vector<mutation>> alter_type_statement::prepare_announcement_mutatio
auto res = co_await service::prepare_view_update_announcement(sp, view_ptr(cfm.build()), ts);
std::move(res.begin(), res.end(), std::back_inserter(m));
} else {
auto res = co_await service::prepare_column_family_update_announcement(sp, cfm.build(), false, {}, ts);
auto res = co_await service::prepare_column_family_update_announcement(sp, cfm.build(), {}, ts);
std::move(res.begin(), res.end(), std::back_inserter(m));
}
}

View File

@@ -42,12 +42,6 @@ public:
protected:
virtual user_type make_updated_type(data_dictionary::database db, user_type to_update) const = 0;
private:
struct base_visitor {
virtual future<> operator()(view_ptr view) = 0;
virtual future<> operator()(user_type type) = 0;
virtual future<> operator()(schema_ptr cfm, bool from_thrift, std::vector<view_ptr>&& view_updates, std::optional<api::timestamp_type> ts_opt) = 0;
};
future<std::vector<mutation>> prepare_announcement_mutations(service::storage_proxy& sp, api::timestamp_type) const;
};

View File

@@ -70,8 +70,7 @@ private:
cql_stats& _stats;
public:
/**
* Creates a new BatchStatement from a list of statements and a
* Thrift consistency level.
* Creates a new BatchStatement from a list of statements
*
* @param type type of the batch
* @param statements a list of UpdateStatements

View File

@@ -131,7 +131,7 @@ std::vector<::shared_ptr<index_target>> create_index_statement::validate_while_e
}
// Origin TODO: we could lift that limitation
if ((schema->is_dense() || !schema->thrift().has_compound_comparator()) && cd->is_primary_key()) {
if ((schema->is_dense() || !schema->is_compound()) && cd->is_primary_key()) {
throw exceptions::invalid_request_exception(
"Secondary indexes are not supported on PRIMARY KEY columns in COMPACT STORAGE tables");
}
@@ -382,7 +382,7 @@ create_index_statement::prepare_schema_mutations(query_processor& qp, const quer
std::vector<mutation> m;
if (res) {
m = co_await service::prepare_column_family_update_announcement(qp.proxy(), std::move(res->schema), false, {}, ts);
m = co_await service::prepare_column_family_update_announcement(qp.proxy(), std::move(res->schema), {}, ts);
ret = ::make_shared<event::schema_change>(
event::schema_change::change_type::UPDATED,

View File

@@ -78,7 +78,7 @@ drop_index_statement::prepare_schema_mutations(query_processor& qp, const query_
auto cfm = make_drop_idex_schema(qp);
if (cfm) {
m = co_await service::prepare_column_family_update_announcement(qp.proxy(), cfm, false, {}, ts);
m = co_await service::prepare_column_family_update_announcement(qp.proxy(), cfm, {}, ts);
using namespace cql_transport;
ret = ::make_shared<event::schema_change>(event::schema_change::change_type::UPDATED,

View File

@@ -38,9 +38,6 @@ future<> drop_keyspace_statement::check_access(query_processor& qp, const servic
void drop_keyspace_statement::validate(query_processor&, const service::client_state& state) const
{
warn(unimplemented::cause::VALIDATION);
#if 0
ThriftValidation.validateKeyspaceNotSystem(keyspace);
#endif
}
const sstring& drop_keyspace_statement::keyspace() const

View File

@@ -84,9 +84,6 @@ future<> truncate_statement::check_access(query_processor& qp, const service::cl
void truncate_statement::validate(query_processor&, const service::client_state& state) const
{
warn(unimplemented::cause::VALIDATION);
#if 0
ThriftValidation.validateColumnFamily(keyspace(), columnFamily());
#endif
}
future<::shared_ptr<cql_transport::messages::result_message>>

View File

@@ -416,16 +416,16 @@ db::config::config(std::shared_ptr<db::extensions> exts)
*/
, commit_failure_policy(this, "commit_failure_policy", value_status::Unused, "stop",
"Policy for commit disk failures:\n"
"* die Shut down gossip and Thrift and kill the JVM, so the node can be replaced.\n"
"* stop Shut down gossip and Thrift, leaving the node effectively dead, but can be inspected using JMX.\n"
"* die Shut down gossip, so the node can be replaced.\n"
"* stop Shut down gossip, leaving the node effectively dead, but can be inspected using the RESTful APIs.\n"
"* stop_commit Shut down the commit log, letting writes collect but continuing to service reads (as in pre-2.0.5 Cassandra).\n"
"* ignore Ignore fatal errors and let the batches fail."
, {"die", "stop", "stop_commit", "ignore"})
, disk_failure_policy(this, "disk_failure_policy", value_status::Unused, "stop",
"Sets how Scylla responds to disk failure. Recommend settings are stop or best_effort.\n"
"* die Shut down gossip and Thrift and kill the JVM for any file system errors or single SSTable errors, so the node can be replaced.\n"
"* stop_paranoid Shut down gossip and Thrift even for single SSTable errors.\n"
"* stop Shut down gossip and Thrift, leaving the node effectively dead, but available for inspection using JMX.\n"
"* die Shut down gossip for any file system errors or single SSTable errors, so the node can be replaced.\n"
"* stop_paranoid Shut down gossip even for single SSTable errors.\n"
"* stop Shut down gossip, leaving the node effectively dead, but available for inspection using the RESTful APIs.\n"
"* best_effort Stop using the failed disk and respond to requests based on the remaining available SSTables. This means you will see obsolete data at consistency level of ONE.\n"
"* ignore Ignores fatal errors and lets the requests fail; all file system errors are logged but otherwise ignored. Scylla acts as in versions prior to Cassandra 1.2.\n"
"Related information: Handling Disk Failures In Cassandra 1.2 blog and Recovering from a single disk failure using JBOD.\n"
@@ -442,7 +442,7 @@ db::config::config(std::shared_ptr<db::extensions> exts)
"\n"
"Related information: Snitches\n")
, rpc_address(this, "rpc_address", value_status::Used, "localhost",
"The listen address for client connections (Thrift RPC service and native transport).Valid values are:\n"
"The listen address for client connections (native transport).Valid values are:\n"
"* unset: Resolves the address using the hostname configuration of the node. If left unset, the hostname must resolve to the IP address of this node using /etc/hostname, /etc/hosts, or DNS.\n"
"* 0.0.0.0: Listens on all configured interfaces, but you must set the broadcast_rpc_address to a value other than 0.0.0.0.\n"
"* IP address\n"
@@ -799,26 +799,12 @@ db::config::config(std::shared_ptr<db::extensions> exts)
*/
, broadcast_rpc_address(this, "broadcast_rpc_address", value_status::Used, {/* unset */},
"RPC address to broadcast to drivers and other Scylla nodes. This cannot be set to 0.0.0.0. If blank, it is set to the value of the rpc_address or rpc_interface. If rpc_address or rpc_interfaceis set to 0.0.0.0, this property must be set.\n")
, rpc_port(this, "rpc_port", "thrift_port", value_status::Used, 9160,
, rpc_port(this, "rpc_port", "thrift_port", value_status::Unused, 0,
"Thrift port for client connections.")
, start_rpc(this, "start_rpc", value_status::Used, false,
, start_rpc(this, "start_rpc", value_status::Unused, false,
"Starts the Thrift RPC server")
, rpc_keepalive(this, "rpc_keepalive", value_status::Used, true,
"Enable or disable keepalive on client connections (RPC or native).")
, rpc_max_threads(this, "rpc_max_threads", value_status::Invalid, 0,
"Regardless of your choice of RPC server (rpc_server_type), the number of maximum requests in the RPC thread pool dictates how many concurrent requests are possible. However, if you are using the parameter sync in the rpc_server_type, it also dictates the number of clients that can be connected. For a large number of client connections, this could cause excessive memory usage for the thread stack. Connection pooling on the client side is highly recommended. Setting a maximum thread pool size acts as a safeguard against misbehaved clients. If the maximum is reached, Cassandra blocks additional connections until a client disconnects.")
, rpc_min_threads(this, "rpc_min_threads", value_status::Invalid, 16,
"Sets the minimum thread pool size for remote procedure calls.")
, rpc_recv_buff_size_in_bytes(this, "rpc_recv_buff_size_in_bytes", value_status::Unused, 0,
"Sets the receiving socket buffer size for remote procedure calls.")
, rpc_send_buff_size_in_bytes(this, "rpc_send_buff_size_in_bytes", value_status::Unused, 0,
"Sets the sending socket buffer size in bytes for remote procedure calls.")
, rpc_server_type(this, "rpc_server_type", value_status::Unused, "sync",
"Cassandra provides three options for the RPC server. On Windows, sync is about 30% slower than hsha. On Linux, sync and hsha performance is about the same, but hsha uses less memory.\n"
"* sync (Default One thread per Thrift connection.) For a very large number of clients, memory is the limiting factor. On a 64-bit JVM, 180KB is the minimum stack size per thread and corresponds to your use of virtual memory. Physical memory may be limited depending on use of stack space.\n"
"* hsh Half synchronous, half asynchronous. All Thrift clients are handled asynchronously using a small number of threads that does not vary with the number of clients and thus scales well to many clients. The RPC requests are synchronous (one thread per active request).\n"
"* Note: When selecting this option, you must change the default value (unlimited) of rpc_max_threads.\n"
"* Your own RPC server: You must provide a fully-qualified class name of an o.a.c.t.TServerFactory that can create a server instance.")
"Enable or disable keepalive on client connections (CQL native, Redis and the maintenance socket).")
, cache_hit_rate_read_balancing(this, "cache_hit_rate_read_balancing", value_status::Used, true,
"This boolean controls whether the replicas for read query will be chosen based on cache hit ratio.")
/**
@@ -865,14 +851,6 @@ db::config::config(std::shared_ptr<db::extensions> exts)
"* default_weight: (Default: 1 **) How many requests are handled during each turn of the RoundRobin.\n"
"* weights: (Default: Keyspace: 1) Takes a list of keyspaces. It sets how many requests are handled during each turn of the RoundRobin, based on the request_scheduler_id.")
/**
* @Group Thrift interface properties
* @GroupDescription Legacy API for older clients. CQL is a simpler and better API for Scylla.
*/
, thrift_framed_transport_size_in_mb(this, "thrift_framed_transport_size_in_mb", value_status::Unused, 15,
"Frame size (maximum field length) for Thrift. The frame is the row or part of the row the application is inserting.")
, thrift_max_message_length_in_mb(this, "thrift_max_message_length_in_mb", value_status::Used, 16,
"The maximum length of a Thrift message in megabytes, including all fields and internal Thrift overhead (1 byte of overhead for each frame). Message length is usually used in conjunction with batches. A frame length greater than or equal to 24 accommodates a batch with four inserts, each of which is 24 bytes. The required message length is greater than or equal to 24+24+24+24+4 (number of frames).")
/**
* @Group Security properties
* @GroupDescription Server and client security settings.
*/

View File

@@ -293,11 +293,6 @@ public:
named_value<uint16_t> rpc_port;
named_value<bool> start_rpc;
named_value<bool> rpc_keepalive;
named_value<uint32_t> rpc_max_threads;
named_value<uint32_t> rpc_min_threads;
named_value<uint32_t> rpc_recv_buff_size_in_bytes;
named_value<uint32_t> rpc_send_buff_size_in_bytes;
named_value<sstring> rpc_server_type;
named_value<bool> cache_hit_rate_read_balancing;
named_value<double> dynamic_snitch_badness_threshold;
named_value<uint32_t> dynamic_snitch_reset_interval_in_ms;
@@ -311,8 +306,6 @@ public:
named_value<sstring> request_scheduler;
named_value<sstring> request_scheduler_id;
named_value<string_map> request_scheduler_options;
named_value<uint32_t> thrift_framed_transport_size_in_mb;
named_value<uint32_t> thrift_max_message_length_in_mb;
named_value<sstring> authenticator;
named_value<sstring> internode_authenticator;
named_value<sstring> authorizer;

View File

@@ -2802,7 +2802,6 @@ static void add_drop_column_to_mutations(schema_ptr table, const sstring& name,
static void make_update_columns_mutations(schema_ptr old_table,
schema_ptr new_table,
api::timestamp_type timestamp,
bool from_thrift,
std::vector<mutation>& mutations) {
mutation columns_mutation(columns(), partition_key::from_singular(*columns(), old_table->ks_name()));
mutation view_virtual_columns_mutation(view_virtual_columns(), partition_key::from_singular(*columns(), old_table->ks_name()));
@@ -2815,9 +2814,6 @@ static void make_update_columns_mutations(schema_ptr old_table,
// Thrift only knows about the REGULAR ColumnDefinition type, so don't consider other type
// are being deleted just because they are not here.
const column_definition& column = *old_table->v3().columns_by_name().at(name);
if (from_thrift && !column.is_regular()) {
continue;
}
if (column.is_view_virtual()) {
drop_column_from_schema_mutation(view_virtual_columns(), old_table, column.name_as_text(), timestamp, mutations);
} else {
@@ -2859,13 +2855,12 @@ std::vector<mutation> make_update_table_mutations(replica::database& db,
lw_shared_ptr<keyspace_metadata> keyspace,
schema_ptr old_table,
schema_ptr new_table,
api::timestamp_type timestamp,
bool from_thrift)
api::timestamp_type timestamp)
{
std::vector<mutation> mutations;
add_table_or_view_to_schema_mutation(new_table, timestamp, false, mutations);
make_update_indices_mutations(db, old_table, new_table, timestamp, mutations);
make_update_columns_mutations(std::move(old_table), std::move(new_table), timestamp, from_thrift, mutations);
make_update_columns_mutations(std::move(old_table), std::move(new_table), timestamp, mutations);
warn(unimplemented::cause::TRIGGERS);
#if 0
@@ -3550,7 +3545,7 @@ std::vector<mutation> make_update_view_mutations(lw_shared_ptr<keyspace_metadata
add_table_or_view_to_schema_mutation(base, timestamp - 1, true, mutations);
}
add_table_or_view_to_schema_mutation(new_view, timestamp, false, mutations);
make_update_columns_mutations(old_view, new_view, timestamp, false, mutations);
make_update_columns_mutations(old_view, new_view, timestamp, mutations);
return mutations;
}

View File

@@ -238,8 +238,7 @@ std::vector<mutation> make_update_table_mutations(
lw_shared_ptr<keyspace_metadata> keyspace,
schema_ptr old_table,
schema_ptr new_table,
api::timestamp_type timestamp,
bool from_thrift);
api::timestamp_type timestamp);
future<std::map<sstring, schema_ptr>> create_tables_from_tables_partition(distributed<service::storage_proxy>& proxy, const schema_result::mapped_type& result);

View File

@@ -301,7 +301,7 @@ future<> system_distributed_keyspace::start() {
// The service_levels table exists. Update it if it lacks new columns.
if (table->cf_name() == SERVICE_LEVELS && !get_current_service_levels(db)->equal_columns(*table)) {
auto update_mutations = co_await service::prepare_column_family_update_announcement(_sp, table, false, std::vector<view_ptr>(), ts);
auto update_mutations = co_await service::prepare_column_family_update_announcement(_sp, table, std::vector<view_ptr>(), ts);
std::move(update_mutations.begin(), update_mutations.end(), std::back_inserter(mutations));
}
});

View File

@@ -18,7 +18,6 @@
#include <seastar/core/on_internal_error.hh>
#include "system_keyspace.hh"
#include "cql3/untyped_result_set.hh"
#include "thrift/server.hh"
#include "cql3/query_processor.hh"
#include "partition_slice_builder.hh"
#include "db/config.hh"
@@ -461,6 +460,7 @@ schema_ptr system_keyspace::built_indexes() {
builder.remove_column("scylla_cpu_sharding_algorithm");
builder.remove_column("scylla_nr_shards");
builder.remove_column("scylla_msb_ignore");
builder.remove_column("thrift_version");
return builder.build(schema_builder::compact_storage::no);
}();
return local;
@@ -1544,7 +1544,7 @@ future<system_keyspace::local_info> system_keyspace::load_local_info() {
future<> system_keyspace::save_local_info(local_info sysinfo, locator::endpoint_dc_rack location, gms::inet_address broadcast_address, gms::inet_address broadcast_rpc_address) {
auto& cfg = _db.get_config();
sstring req = fmt::format("INSERT INTO system.{} (key, host_id, cluster_name, release_version, cql_version, thrift_version, native_protocol_version, data_center, rack, partitioner, rpc_address, broadcast_address, listen_address) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
sstring req = fmt::format("INSERT INTO system.{} (key, host_id, cluster_name, release_version, cql_version, native_protocol_version, data_center, rack, partitioner, rpc_address, broadcast_address, listen_address) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
, db::system_keyspace::LOCAL);
return execute_cql(req, sstring(db::system_keyspace::LOCAL),
@@ -1552,7 +1552,6 @@ future<> system_keyspace::save_local_info(local_info sysinfo, locator::endpoint_
sysinfo.cluster_name,
version::release(),
cql3::query_processor::CQL_VERSION,
::cassandra::thrift_version,
to_sstring(unsigned(cql_serialization_format::latest().protocol_version())),
location.dc,
location.rack,

View File

@@ -69,7 +69,7 @@ future<> modify_tags(service::migration_manager& mm, sstring ks, sstring cf,
builder.add_extension(tags_extension::NAME, ::make_shared<tags_extension>(tags));
auto m = co_await service::prepare_column_family_update_announcement(mm.get_storage_proxy(),
builder.build(), false, std::vector<view_ptr>(), group0_guard.write_timestamp());
builder.build(), std::vector<view_ptr>(), group0_guard.write_timestamp());
co_await mm.announce(std::move(m), std::move(group0_guard), format("Modify tags for {} table", cf));
});

View File

@@ -150,7 +150,6 @@ in derive ({
rapidjson
snappy
systemd
thrift
valgrind
xorg.libpciaccess
xxHash

View File

@@ -118,7 +118,6 @@ INFO 2016-08-04 06:57:40,836 [shard 7] database - Setting compaction strategy o
INFO 2016-08-04 06:57:40,837 [shard 6] database - Setting compaction strategy of system_traces.events to SizeTieredCompactionStrategy
INFO 2016-08-04 06:57:40,839 [shard 0] database - Schema version changed to fea14d93-9c5a-34f5-9d0e-2e49dcfa747e
INFO 2016-08-04 06:57:40,839 [shard 0] storage_service - Starting listening for CQL clients on 172.17.0.2:9042...
INFO 2016-08-04 06:57:40,840 [shard 0] storage_service - Thrift server listening on 172.17.0.2:9160 ...
```
### Configuring data volume for storage

View File

@@ -149,31 +149,25 @@ The CQL protocol support can be disabled altogether by setting the
These option names were chosen for backward-compatibility with Cassandra
configuration files: they refer to CQL as the "native transport", to
contrast with the older Thrift protocol (described below) which wasn't
native to Cassandra.
contrast with the older Thrift protocol which wasn't
native to Cassandra. The thrift protocol was once supported by Scylla,
but the support of this protocol was later deprecated and removed.
There is also a `rpc_address` configuration option to set the IP address
(and therefore network interface) on which Scylla should listen for the
CQL protocol. This address defaults to `localhost`, but in any setup except
a one-node test, should be overridden. Note that the same option `rpc_address`
applies to both CQL and Thrift protocols.
TODO: there is also `rpc_interface` option... Which wins? What's the default?
a one-node test, should be overridden.
## Thrift client protocol
The Apache Thrift protocol was early Cassandra's client protocol, until
it was superseded in Cassandra 1.2 with the binary CQL protocol. Thrift
was still nominally supported by both Cassandra and Scylla for many years,
but was recently dropped in Cassandra (version 4.0) and is likely to be
dropped by Scylla in the future as well, so it is not recommended for new
applications.
but was recently dropped in Cassandra (version 4.0) and was also
dropped by Scylla in version 6.0 as well.
By default, Scylla does not enable the Thrift server. In order to use it,
it must be explicitly enabled by setting the `start_rpc` configuration option
to true.
When Thrift is enabled, by default scylla listens to the Thrift protocol on port 9160,
When Thrift was enabled by Scylla versions earlier than 6.0, by default scylla
listens to the Thrift protocol on port 9160,
which can be configured via the `rpc_port` configuration option. Again, this confusing
name was used for backward-compatibility with Cassandra's configuration files.
Cassandra used the term "rpc" because Apache Thrift is a remote procedure
@@ -181,14 +175,11 @@ call (RPC) framework. In Scylla, this name is especially confusing, because
as mentioned above, Scylla's internal communication protocol is based on
Seastar's RPC, which has nothing to do with the "`rpc_port`" described here.
There is also a `rpc_address` configuration option to set the IP address
(and therefore network interface) on which Scylla should listen for the
Thrift protocol. This address defaults to `localhost`, but in any
setup except a one-node test, should be overridden. Note that the same
option `rpc_address` applies to both CQL and Thrift protocols.
This option is now marked `Unused`, and still stays with us for one more
release, because scylla need to be able to consume existing configurations,
and to work with toolings which might be still setting this option.
TODO: there is also `rpc_interface` option... Which wins? What's the default?
TODO: is there an SSL version of Thrift?
## DynamoDB client protocol
@@ -224,8 +215,8 @@ and/or `redis_ssl_port` configuration option.
The traditional port used for Redis is 6379. Regular Redis does not
support SSL, so there is no traditional choice of port for it.
The same `rpc_address` configuration option used by the CQL and Thrift
protocols to set the IP address (and therefore network interface) on which
The same `rpc_address` configuration option used by the CQL
protocol to set the IP address (and therefore network interface) on which
Scylla should listen also applies to the Redis protocol.
See [redis.md](redis.md) for more information about Scylla's

View File

@@ -276,7 +276,7 @@ Implemented by `cluster_status_table` in `db/system_keyspace.cc`.
## system.protocol_servers
The list of all the client-facing data-plane protocol servers and listen addresses (if running).
Equivalent of the `nodetool statusbinary` plus the `Thrift active` and `Native Transport active` fields from `nodetool info`.
Equivalent of the `nodetool statusbinary` plus the `Native Transport active` fields from `nodetool info`.
TODO: include control-plane diagnostics-plane protocols here too.

View File

@@ -164,7 +164,7 @@ Configure and Run ScyllaDB
* ``seeds`` - The IP address of the first node. Other nodes will use it as the first contact
point to discover the cluster topology when joining the cluster.
* ``listen_address`` - The IP address that ScyllaDB uses to connect to other nodes in the cluster.
* ``rpc_address`` - The IP address of the interface for client connections (Thrift, CQL).
* ``rpc_address`` - The IP address of the interface for CQL client connections.
#. Run the ``scylla_setup`` script to tune the system settings and determine the optimal configuration.

View File

@@ -22,8 +22,6 @@ Port Description Protocol
------ -------------------------------------------- --------
9100 node_exporter (Optionally) TCP
------ -------------------------------------------- --------
9160 Scylla client port (Thrift) TCP
------ -------------------------------------------- --------
19042 Native shard-aware transport port TCP
------ -------------------------------------------- --------
19142 Native shard-aware transport port (ssl) TCP

View File

@@ -45,7 +45,7 @@ Primary Options:
-rate: Thread count, rate limit or automatic mode (default is auto).
-mode: Thrift or CQL with options.
-mode: CQL with options.
-errors: How to handle errors when encountered during stress.

View File

@@ -67,7 +67,7 @@ The following addresses can be configured in scylla.yaml:
* - listen_address
- Address Scylla listens for connections from other nodes. See storage_port and ssl_storage_ports.
* - rpc_address
- Address on which Scylla is going to expect Thrift and CQL client connections. See rpc_port, native_transport_port and native_transport_port_ssl in the :ref:`Networking <cqlsh-networking>` parameters.
- Address on which Scylla is going to expect CQL client connections. See rpc_port, native_transport_port and native_transport_port_ssl in the :ref:`Networking <cqlsh-networking>` parameters.
* - broadcast_address
- Address that is broadcasted to tell other Scylla nodes to connect to. Related to listen_address above.
* - broadcast_rpc_address
@@ -167,7 +167,7 @@ Do not set any IP address to :code:`0.0.0.0`.
- Address Scylla listens for connections from other nodes. See storage_port and ssl_storage_ports.
- No default
* - rpc_address (required)
- Address on which Scylla is going to expect Thrift and CQL clients connections. See rpc_port, native_transport_port and native_transport_port_ssl in the :ref:`Networking <cqlsh-networking>` parameters.
- Address on which Scylla is going to expect CQL clients connections. See rpc_port, native_transport_port and native_transport_port_ssl in the :ref:`Networking <cqlsh-networking>` parameters.
- No default
* - broadcast_address
- Address that is broadcasted to tell other Scylla nodes to connect to. Related to listen_address above.

View File

@@ -17,7 +17,7 @@ Example output:
ID : 2110829b-47f2-4a6b-b87e-a81bc3b5cb31
Gossip active : true
Thrift active : true
Thrift active : false
Native Transport active: true
Load : 294.44 MB
Generation No : 1474434958

View File

@@ -188,8 +188,6 @@ To display the log classes (output changes with each version so your display may
tags
task_manager
testlog
thrift
thrift_controller
token_group_based_splitting_mutation_writer
token_metadata
topology

View File

@@ -121,7 +121,7 @@ Add New DC
* **seeds** - IP address of an existing node (or nodes).
* **listen_address** - IP address that Scylla used to connect to the other Scylla nodes in the cluster.
* **endpoint_snitch** - Set the selected snitch.
* **rpc_address** - Address for client connections (Thrift, CQL).
* **rpc_address** - Address for CQL client connections.
The parameters ``seeds``, ``cluster_name`` and ``endpoint_snitch`` need to match the existing cluster.
@@ -237,4 +237,4 @@ Additional Resources for Java Clients
.. _add-dc-upgrade-info:
.. scylladb_include_flag:: upgrade-warning-add-new-node-or-dc.rst
.. scylladb_include_flag:: upgrade-warning-add-new-node-or-dc.rst

View File

@@ -47,7 +47,7 @@ Procedure
* **endpoint_snitch** - Specifies the selected snitch.
* **rpc_address** - Specifies the address for client connections (Thrift, CQL).
* **rpc_address** - Specifies the address for CQL client connections.
* **seeds** - Specifies the IP address of an existing node in the cluster. The new node will use this IP to connect to the cluster and learn the cluster topology and state.
@@ -103,4 +103,4 @@ Procedure
.. _add-new-node-upgrade-info:
.. scylladb_include_flag:: upgrade-warning-add-new-node-or-dc.rst
.. scylladb_include_flag:: upgrade-warning-add-new-node-or-dc.rst

View File

@@ -67,7 +67,7 @@ The file can be found under ``/etc/scylla/``.
- **seeds** - Specify the IP of the node you chose to be a seed node. New nodes will use the IP of this seed node to connect to the cluster and learn the cluster topology and state.
- **listen_address** - IP address that the Scylla use to connect to other Scylla nodes in the cluster
- **endpoint_snitch** - Set the selected snitch
- **rpc_address** - Address for client connection (Thrift, CQLSH)
- **rpc_address** - Address for CQL client connection
3. In the ``cassandra-rackdc.properties`` file, edit the rack and data center information.
The file can be found under ``/etc/scylla/``.

View File

@@ -23,7 +23,7 @@ The file can be found under ``/etc/scylla/``.
- **seeds** - Specify the IP of the node you chose to be a seed node. New nodes will use the IP of this seed node to connect to the cluster and learn the cluster topology and state.
- **listen_address** - IP address that ScyllaDB used to connect to other ScyllaDB nodes in the cluster
- **endpoint_snitch** - Set the selected snitch
- **rpc_address** - Address for client connection (Thrift, CQL)
- **rpc_address** - Address for CQL client connection
3. This step needs to be done **only** if you are using the **GossipingPropertyFileSnitch**. If not, skip this step.
In the ``cassandra-rackdc.properties`` file, edit the parameters listed below.

View File

@@ -60,7 +60,7 @@ Procedure
* **seeds** - Specify the IP of the node you chose to be a seed node. See :doc:`Scylla Seed Nodes </kb/seed-nodes/>` for details.
* **listen_address** - IP address that Scylla used to connect to other Scylla nodes in the cluster.
* **endpoint_snitch** - Set the selected snitch.
* **rpc_address** - Address for client connection (Thrift, CQL).
* **rpc_address** - Address for CQL client connection.
* **broadcast_address** - The IP address a node tells other nodes in the cluster to contact it by.
* **broadcast_rpc_address** - Default: unset. The RPC address to broadcast to drivers and other Scylla nodes. It cannot be set to 0.0.0.0. If left blank, it will be set to the value of ``rpc_address``. If ``rpc_address`` is set to 0.0.0.0, ``broadcast_rpc_address`` must be explicitly configured.

View File

@@ -68,7 +68,7 @@ Procedure
- **endpoint_snitch** - Set the selected snitch
- **rpc_address** - Address for client connection (Thrift, CQL)
- **rpc_address** - Address for CQL client connection
#. Add the ``replace_node_first_boot`` parameter to the ``scylla.yaml`` config file on the new node. This line can be added to any place in the config file. After a successful node replacement, there is no need to remove it from the ``scylla.yaml`` file. (Note: The obsolete parameters "replace_address" and "replace_address_first_boot" are not supported and should not be used). The value of the ``replace_node_first_boot`` parameter should be the Host ID of the node to be replaced.

View File

@@ -31,7 +31,7 @@ The ``docker run`` command starts a new Docker instance in the background named
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
616ee646cb9d scylladb/scylla "/docker-entrypoint.p" 4 seconds ago Up 4 seconds 7000-7001/tcp, 9042/tcp, 9160/tcp, 10000/tcp some-scylla
As seen from the ``docker ps`` output, the image exposes ports **7000-7001** (Inter-node RPC), **9042** (CQL), **9160** (Thrift), and **10000** (REST API).
As seen from the ``docker ps`` output, the image exposes ports **7000-7001** (Inter-node RPC), **9042** (CQL), and **10000** (REST API).
Viewing ScyllaDB Server Logs

View File

@@ -77,7 +77,7 @@ seeds IP address of an existing node in the cluster. It allows a new n
-------------- --------------------------------------------------
listen_address IP address that the Scylla use to connect to other Scylla nodes in the cluster
-------------- --------------------------------------------------
rpc_address IP address of the interface for client connections (Thrift, CQL)
rpc_address IP address of the interface for CQL client connections
============== ==================================================
.. _yaml_enabling_experimental_features:

View File

@@ -30,9 +30,10 @@ Interfaces
- | Fully compatible with version 3.3.1, with additional features from later CQL versions (for example, :ref:`Duration type <durations>`).
| Fully compatible with protocol v4, with additional features from v5.
- More below
* - Thrift
- Deprecated in ScyllaDB and Cassandra
- Support for the Thrift protocol is deprecated and will be dropped in future releases of ScyllaDB.
* - Thrift
- Not supported anymore in ScyllaDB 6.0
- | deprecated in Apache Cassandra and got dropped in 4.0
| deprecated in ScyllaDB 5.2 and got dropped in 6.0
* - SSTable format (all versions)
- 3.11(mc / md / me), 2.2(la), 2.1.8 (ka)
- | ``me`` - supported in ScyllaDB Open Source 5.1 and ScyllaDB Enterprise 2022.2.0 (and later)

View File

@@ -534,7 +534,6 @@ Other limitations are more minor:
* While a non-LWT batch can be UNLOGGED, a conditional batch cannot;
* IF conditions must be a perfect conjunct (... AND ... AND ...);
* Unlike Cassandra, Scylla doesn't have LWT support in Thrift protocol and doesn't plan to add it;
* Conditional batches are always logged in system.paxos table, so UNLOGGED keyword is silently ignored for them.
Additional Information

View File

@@ -40,9 +40,7 @@ debian_base_packages=(
libsnappy-dev
libjsoncpp-dev
rapidjson-dev
scylla-libthrift010-dev
scylla-antlr35-c++-dev
thrift-compiler
git
pigz
libunistring-dev
@@ -61,7 +59,6 @@ fedora_packages=(
gdb
lua-devel
yaml-cpp-devel
thrift-devel
antlr3-tool
antlr3-C++-devel
jsoncpp-devel
@@ -152,7 +149,6 @@ pip_symlinks=(
centos_packages=(
gdb
yaml-cpp-devel
thrift-devel
scylla-antlr35-tool
scylla-antlr35-C++-devel
jsoncpp-devel snappy-devel
@@ -184,7 +180,6 @@ arch_packages=(
python3
rapidjson
snappy
thrift
)
go_arch() {
@@ -318,7 +313,7 @@ if [ "$ID" = "ubuntu" ] || [ "$ID" = "debian" ]; then
else
apt-get -y install libsystemd-dev antlr3 libyaml-cpp-dev
fi
echo -e "Configure example:\n\t./configure.py --enable-dpdk --mode=release --static-thrift --static-boost --static-yaml-cpp --compiler=/opt/scylladb/bin/g++-7 --cflags=\"-I/opt/scylladb/include -L/opt/scylladb/lib/x86-linux-gnu/\" --ldflags=\"-Wl,-rpath=/opt/scylladb/lib\""
echo -e "Configure example:\n\t./configure.py --enable-dpdk --mode=release --static-boost --static-yaml-cpp --compiler=/opt/scylladb/bin/g++-7 --cflags=\"-I/opt/scylladb/include -L/opt/scylladb/lib/x86-linux-gnu/\" --ldflags=\"-Wl,-rpath=/opt/scylladb/lib\""
elif [ "$ID" = "fedora" ]; then
if rpm -q --quiet yum-utils; then
echo

View File

@@ -1,58 +0,0 @@
# Generate C++ source files from thrift definitions
function(scylla_generate_thrift)
set(one_value_args TARGET VAR THRIFT_VERSION IN_FILE OUT_DIR SERVICE)
cmake_parse_arguments(args "" "${one_value_args}" "" ${ARGN})
get_filename_component(in_file_name ${args_IN_FILE} NAME_WE)
set(aux_out_file_name ${args_OUT_DIR}/${in_file_name})
set(outputs
${aux_out_file_name}_types.cpp
${aux_out_file_name}_types.h
${aux_out_file_name}_constants.cpp
${aux_out_file_name}_constants.h
${args_OUT_DIR}/${args_SERVICE}.cpp
${args_OUT_DIR}/${args_SERVICE}.h)
find_program(THRIFT thrift
REQUIRED)
execute_process(
COMMAND "${THRIFT}" -version
OUTPUT_VARIABLE thrift_version_output
OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REGEX MATCH "[0-9]+\.[0-9]+\.[0-9]+$"
thrift_version "${thrift_version_output}")
set(${args_THRIFT_VERSION} ${thrift_version} PARENT_SCOPE)
add_custom_command(
DEPENDS ${args_IN_FILE}
OUTPUT ${outputs}
COMMAND ${CMAKE_COMMAND} -E make_directory ${args_OUT_DIR}
COMMAND ${THRIFT} -gen cpp:cob_style,no_skeleton -out "${args_OUT_DIR}" "${args_IN_FILE}")
add_custom_target(${args_TARGET}
DEPENDS ${outputs})
set(${args_VAR} ${outputs} PARENT_SCOPE)
endfunction()
scylla_generate_thrift(
TARGET scylla_thrift_gen_cassandra
VAR scylla_thrift_gen_cassandra_files
THRIFT_VERSION thrift_version
IN_FILE "${CMAKE_CURRENT_SOURCE_DIR}/cassandra.thrift"
OUT_DIR ${scylla_gen_build_dir}
SERVICE Cassandra)
add_library(interface STATIC)
target_sources(interface
PRIVATE
${scylla_thrift_gen_cassandra_files})
target_include_directories(interface
PUBLIC
${scylla_gen_build_dir})
if(thrift_version VERSION_LESS 0.11.0)
target_compile_definitions(interface
PUBLIC
THRIFT_USES_BOOST)
endif()

View File

@@ -1,941 +0,0 @@
#!/usr/local/bin/thrift --java --php --py
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2014-present ScyllaDB
#
#
# This file has been modified from the Apache distribution
# by ScyllaDB
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# *** PLEASE REMEMBER TO EDIT THE VERSION CONSTANT WHEN MAKING CHANGES ***
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Interface definition for Cassandra Service
#
namespace java org.apache.cassandra.thrift
namespace cpp cassandra
namespace netstd Apache.Cassandra
namespace py cassandra
namespace php cassandra
namespace perl Cassandra
# Thrift.rb has a bug where top-level modules that include modules
# with the same name are not properly referenced, so we can't do
# Cassandra::Cassandra::Client.
namespace rb CassandraThrift
# The API version (NOT the product version), composed as a dot delimited
# string with major, minor, and patch level components.
#
# - Major: Incremented for backward incompatible changes. An example would
# be changes to the number or disposition of method arguments.
# - Minor: Incremented for backward compatible changes. An example would
# be the addition of a new (optional) method.
# - Patch: Incremented for bug fixes. The patch level should be increased
# for every edit that doesn't result in a change to major/minor.
#
# See the Semantic Versioning Specification (SemVer) http://semver.org.
#
# Note that this backwards compatibility is from the perspective of the server,
# not the client. Cassandra should always be able to talk to older client
# software, but client software may not be able to talk to older Cassandra
# instances.
#
# An effort should be made not to break forward-client-compatibility either
# (e.g. one should avoid removing obsolete fields from the IDL), but no
# guarantees in this respect are made by the Cassandra project.
const string VERSION_ = "20.1.0"
#
# data structures
#
/** Basic unit of data within a ColumnFamily.
* @param name, the name by which this column is set and retrieved. Maximum 64KB long.
* @param value. The data associated with the name. Maximum 2GB long, but in practice you should limit it to small numbers of MB (since Thrift must read the full value into memory to operate on it).
* @param timestamp. The timestamp is used for conflict detection/resolution when two columns with same name need to be compared.
* @param ttl. An optional, positive delay (in seconds) after which the column will be automatically deleted.
*/
struct Column {
1: required binary name,
2: optional binary value,
3: optional i64 timestamp,
4: optional i32 ttl,
}
/** A named list of columns.
* @param name. see Column.name.
* @param columns. A collection of standard Columns. The columns within a super column are defined in an adhoc manner.
* Columns within a super column do not have to have matching structures (similarly named child columns).
*/
struct SuperColumn {
1: required binary name,
2: required list<Column> columns,
}
struct CounterColumn {
1: required binary name,
2: required i64 value
}
struct CounterSuperColumn {
1: required binary name,
2: required list<CounterColumn> columns
}
/**
Methods for fetching rows/records from Cassandra will return either a single instance of ColumnOrSuperColumn or a list
of ColumnOrSuperColumns (get_slice()). If you're looking up a SuperColumn (or list of SuperColumns) then the resulting
instances of ColumnOrSuperColumn will have the requested SuperColumn in the attribute super_column. For queries resulting
in Columns, those values will be in the attribute column. This change was made between 0.3 and 0.4 to standardize on
single query methods that may return either a SuperColumn or Column.
If the query was on a counter column family, you will either get a counter_column (instead of a column) or a
counter_super_column (instead of a super_column)
@param column. The Column returned by get() or get_slice().
@param super_column. The SuperColumn returned by get() or get_slice().
@param counter_column. The Counterolumn returned by get() or get_slice().
@param counter_super_column. The CounterSuperColumn returned by get() or get_slice().
*/
struct ColumnOrSuperColumn {
1: optional Column column,
2: optional SuperColumn super_column,
3: optional CounterColumn counter_column,
4: optional CounterSuperColumn counter_super_column
}
#
# Exceptions
# (note that internal server errors will raise a TApplicationException, courtesy of Thrift)
#
/** A specific column was requested that does not exist. */
exception NotFoundException {
}
/** Invalid request could mean keyspace or column family does not exist, required parameters are missing, or a parameter is malformed.
why contains an associated error message.
*/
exception InvalidRequestException {
1: required string why
}
/** Not all the replicas required could be created and/or read. */
exception UnavailableException {
}
/** RPC timeout was exceeded. either a node failed mid-operation, or load was too high, or the requested op was too large. */
exception TimedOutException {
/**
* if a write operation was acknowledged by some replicas but not by enough to
* satisfy the required ConsistencyLevel, the number of successful
* replies will be given here. In case of atomic_batch_mutate method this field
* will be set to -1 if the batch was written to the batchlog and to 0 if it wasn't.
*/
1: optional i32 acknowledged_by
/**
* in case of atomic_batch_mutate method this field tells if the batch
* was written to the batchlog.
*/
2: optional bool acknowledged_by_batchlog
/**
* for the CAS method, this field tells if we timed out during the paxos
* protocol, as opposed to during the commit of our update
*/
3: optional bool paxos_in_progress
}
/** invalid authentication request (invalid keyspace, user does not exist, or credentials invalid) */
exception AuthenticationException {
1: required string why
}
/** invalid authorization request (user does not have access to keyspace) */
exception AuthorizationException {
1: required string why
}
/**
* NOTE: This up outdated exception left for backward compatibility reasons,
* no actual schema agreement validation is done starting from Cassandra 1.2
*
* schemas are not in agreement across all nodes
*/
exception SchemaDisagreementException {
}
#
# service api
#
/**
* The ConsistencyLevel is an enum that controls both read and write
* behavior based on the ReplicationFactor of the keyspace. The
* different consistency levels have different meanings, depending on
* if you're doing a write or read operation.
*
* If W + R > ReplicationFactor, where W is the number of nodes to
* block for on write, and R the number to block for on reads, you
* will have strongly consistent behavior; that is, readers will
* always see the most recent write. Of these, the most interesting is
* to do QUORUM reads and writes, which gives you consistency while
* still allowing availability in the face of node failures up to half
* of <ReplicationFactor>. Of course if latency is more important than
* consistency then you can use lower values for either or both.
*
* Some ConsistencyLevels (ONE, TWO, THREE) refer to a specific number
* of replicas rather than a logical concept that adjusts
* automatically with the replication factor. Of these, only ONE is
* commonly used; TWO and (even more rarely) THREE are only useful
* when you care more about guaranteeing a certain level of
* durability, than consistency.
*
* Write consistency levels make the following guarantees before reporting success to the client:
* ANY Ensure that the write has been written once somewhere, including possibly being hinted in a non-target node.
* ONE Ensure that the write has been written to at least 1 node's commit log and memory table
* TWO Ensure that the write has been written to at least 2 node's commit log and memory table
* THREE Ensure that the write has been written to at least 3 node's commit log and memory table
* QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes
* LOCAL_ONE Ensure that the write has been written to 1 node within the local datacenter (requires NetworkTopologyStrategy)
* LOCAL_QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes, within the local datacenter (requires NetworkTopologyStrategy)
* EACH_QUORUM Ensure that the write has been written to <ReplicationFactor> / 2 + 1 nodes in each datacenter (requires NetworkTopologyStrategy)
* ALL Ensure that the write is written to <code>&lt;ReplicationFactor&gt;</code> nodes before responding to the client.
*
* Read consistency levels make the following guarantees before returning successful results to the client:
* ANY Not supported. You probably want ONE instead.
* ONE Returns the record obtained from a single replica.
* TWO Returns the record with the most recent timestamp once two replicas have replied.
* THREE Returns the record with the most recent timestamp once three replicas have replied.
* QUORUM Returns the record with the most recent timestamp once a majority of replicas have replied.
* LOCAL_ONE Returns the record with the most recent timestamp once a single replica within the local datacenter have replied.
* LOCAL_QUORUM Returns the record with the most recent timestamp once a majority of replicas within the local datacenter have replied.
* EACH_QUORUM Returns the record with the most recent timestamp once a majority of replicas within each datacenter have replied.
* ALL Returns the record with the most recent timestamp once all replicas have replied (implies no replica may be down)..
*/
enum ConsistencyLevel {
ONE = 1,
QUORUM = 2,
LOCAL_QUORUM = 3,
EACH_QUORUM = 4,
ALL = 5,
ANY = 6,
TWO = 7,
THREE = 8,
SERIAL = 9,
LOCAL_SERIAL = 10,
LOCAL_ONE = 11,
}
/**
ColumnParent is used when selecting groups of columns from the same ColumnFamily. In directory structure terms, imagine
ColumnParent as ColumnPath + '/../'.
See also <a href="cassandra.html#Struct_ColumnPath">ColumnPath</a>
*/
struct ColumnParent {
3: required string column_family,
4: optional binary super_column,
}
/** The ColumnPath is the path to a single column in Cassandra. It might make sense to think of ColumnPath and
* ColumnParent in terms of a directory structure.
*
* ColumnPath is used to looking up a single column.
*
* @param column_family. The name of the CF of the column being looked up.
* @param super_column. The super column name.
* @param column. The column name.
*/
struct ColumnPath {
3: required string column_family,
4: optional binary super_column,
5: optional binary column,
}
/**
A slice range is a structure that stores basic range, ordering and limit information for a query that will return
multiple columns. It could be thought of as Cassandra's version of LIMIT and ORDER BY
@param start. The column name to start the slice with. This attribute is not required, though there is no default value,
and can be safely set to '', i.e., an empty byte array, to start with the first column name. Otherwise, it
must a valid value under the rules of the Comparator defined for the given ColumnFamily.
@param finish. The column name to stop the slice at. This attribute is not required, though there is no default value,
and can be safely set to an empty byte array to not stop until 'count' results are seen. Otherwise, it
must also be a valid value to the ColumnFamily Comparator.
@param reversed. Whether the results should be ordered in reversed order. Similar to ORDER BY blah DESC in SQL.
@param count. How many columns to return. Similar to LIMIT in SQL. May be arbitrarily large, but Thrift will
materialize the whole result into memory before returning it to the client, so be aware that you may
be better served by iterating through slices by passing the last value of one call in as the 'start'
of the next instead of increasing 'count' arbitrarily large.
*/
struct SliceRange {
1: required binary start,
2: required binary finish,
3: required bool reversed=0,
4: required i32 count=100,
}
/**
A SlicePredicate is similar to a mathematic predicate (see http://en.wikipedia.org/wiki/Predicate_(mathematical_logic)),
which is described as "a property that the elements of a set have in common."
SlicePredicate's in Cassandra are described with either a list of column_names or a SliceRange. If column_names is
specified, slice_range is ignored.
@param column_name. A list of column names to retrieve. This can be used similar to Memcached's "multi-get" feature
to fetch N known column names. For instance, if you know you wish to fetch columns 'Joe', 'Jack',
and 'Jim' you can pass those column names as a list to fetch all three at once.
@param slice_range. A SliceRange describing how to range, order, and/or limit the slice.
*/
struct SlicePredicate {
1: optional list<binary> column_names,
2: optional SliceRange slice_range,
}
enum IndexOperator {
EQ,
GTE,
GT,
LTE,
LT
}
struct IndexExpression {
1: required binary column_name,
2: required IndexOperator op,
3: required binary value,
}
/**
* @deprecated use a KeyRange with row_filter in get_range_slices instead
*/
struct IndexClause {
1: required list<IndexExpression> expressions,
2: required binary start_key,
3: required i32 count=100,
}
/**
The semantics of start keys and tokens are slightly different.
Keys are start-inclusive; tokens are start-exclusive. Token
ranges may also wrap -- that is, the end token may be less
than the start one. Thus, a range from keyX to keyX is a
one-element range, but a range from tokenY to tokenY is the
full ring.
*/
struct KeyRange {
1: optional binary start_key,
2: optional binary end_key,
3: optional string start_token,
4: optional string end_token,
6: optional list<IndexExpression> row_filter,
5: required i32 count=100
}
/**
A KeySlice is key followed by the data it maps to. A collection of KeySlice is returned by the get_range_slice operation.
@param key. a row key
@param columns. List of data represented by the key. Typically, the list is pared down to only the columns specified by
a SlicePredicate.
*/
struct KeySlice {
1: required binary key,
2: required list<ColumnOrSuperColumn> columns,
}
struct KeyCount {
1: required binary key,
2: required i32 count
}
/**
* Note that the timestamp is only optional in case of counter deletion.
*/
struct Deletion {
1: optional i64 timestamp,
2: optional binary super_column,
3: optional SlicePredicate predicate,
}
/**
A Mutation is either an insert (represented by filling column_or_supercolumn) or a deletion (represented by filling the deletion attribute).
@param column_or_supercolumn. An insert to a column or supercolumn (possibly counter column or supercolumn)
@param deletion. A deletion of a column or supercolumn
*/
struct Mutation {
1: optional ColumnOrSuperColumn column_or_supercolumn,
2: optional Deletion deletion,
}
struct EndpointDetails {
1: string host,
2: string datacenter,
3: optional string rack
}
struct CASResult {
1: required bool success,
2: optional list<Column> current_values,
}
/**
A TokenRange describes part of the Cassandra ring, it is a mapping from a range to
endpoints responsible for that range.
@param start_token The first token in the range
@param end_token The last token in the range
@param endpoints The endpoints responsible for the range (listed by their configured listen_address)
@param rpc_endpoints The endpoints responsible for the range (listed by their configured rpc_address)
*/
struct TokenRange {
1: required string start_token,
2: required string end_token,
3: required list<string> endpoints,
4: optional list<string> rpc_endpoints
5: optional list<EndpointDetails> endpoint_details,
}
/**
Authentication requests can contain any data, dependent on the IAuthenticator used
*/
struct AuthenticationRequest {
1: required map<string, string> credentials
}
enum IndexType {
KEYS,
CUSTOM,
COMPOSITES
}
/* describes a column in a column family. */
struct ColumnDef {
1: required binary name,
2: required string validation_class,
3: optional IndexType index_type,
4: optional string index_name,
5: optional map<string,string> index_options
}
/**
Describes a trigger.
`options` should include at least 'class' param.
Other options are not supported yet.
*/
struct TriggerDef {
1: required string name,
2: required map<string,string> options
}
/* describes a column family. */
struct CfDef {
1: required string keyspace,
2: required string name,
3: optional string column_type="Standard",
5: optional string comparator_type="BytesType",
6: optional string subcomparator_type,
8: optional string comment,
12: optional double read_repair_chance,
13: optional list<ColumnDef> column_metadata,
14: optional i32 gc_grace_seconds,
15: optional string default_validation_class,
16: optional i32 id,
17: optional i32 min_compaction_threshold,
18: optional i32 max_compaction_threshold,
26: optional string key_validation_class,
28: optional binary key_alias,
29: optional string compaction_strategy,
30: optional map<string,string> compaction_strategy_options,
32: optional map<string,string> compression_options,
33: optional double bloom_filter_fp_chance,
34: optional string caching="keys_only",
37: optional double dclocal_read_repair_chance = 0.0,
39: optional i32 memtable_flush_period_in_ms,
40: optional i32 default_time_to_live,
42: optional string speculative_retry="NONE",
43: optional list<TriggerDef> triggers,
44: optional string cells_per_row_to_cache = "100",
45: optional i32 min_index_interval,
46: optional i32 max_index_interval,
/* All of the following are now ignored and unsupplied. */
/** @deprecated */
9: optional double row_cache_size,
/** @deprecated */
11: optional double key_cache_size,
/** @deprecated */
19: optional i32 row_cache_save_period_in_seconds,
/** @deprecated */
20: optional i32 key_cache_save_period_in_seconds,
/** @deprecated */
21: optional i32 memtable_flush_after_mins,
/** @deprecated */
22: optional i32 memtable_throughput_in_mb,
/** @deprecated */
23: optional double memtable_operations_in_millions,
/** @deprecated */
24: optional bool replicate_on_write,
/** @deprecated */
25: optional double merge_shards_chance,
/** @deprecated */
27: optional string row_cache_provider,
/** @deprecated */
31: optional i32 row_cache_keys_to_save,
/** @deprecated */
38: optional bool populate_io_cache_on_flush,
/** @deprecated */
41: optional i32 index_interval,
}
/* describes a keyspace. */
struct KsDef {
1: required string name,
2: required string strategy_class,
3: optional map<string,string> strategy_options,
/** @deprecated ignored */
4: optional i32 replication_factor,
5: required list<CfDef> cf_defs,
6: optional bool durable_writes=1,
}
/** CQL query compression */
enum Compression {
GZIP = 1,
NONE = 2
}
enum CqlResultType {
ROWS = 1,
VOID = 2,
INT = 3
}
/**
Row returned from a CQL query.
This struct is used for both CQL2 and CQL3 queries. For CQL2, the partition key
is special-cased and is always returned. For CQL3, it is not special cased;
it will be included in the columns list if it was included in the SELECT and
the key field is always null.
*/
struct CqlRow {
1: required binary key,
2: required list<Column> columns
}
struct CqlMetadata {
1: required map<binary,string> name_types,
2: required map<binary,string> value_types,
3: required string default_name_type,
4: required string default_value_type
}
struct CqlResult {
1: required CqlResultType type,
2: optional list<CqlRow> rows,
3: optional i32 num,
4: optional CqlMetadata schema
}
struct CqlPreparedResult {
1: required i32 itemId,
2: required i32 count,
3: optional list<string> variable_types,
4: optional list<string> variable_names
}
/** Represents input splits used by hadoop ColumnFamilyRecordReaders */
struct CfSplit {
1: required string start_token,
2: required string end_token,
3: required i64 row_count
}
/** The ColumnSlice is used to select a set of columns from inside a row.
* If start or finish are unspecified they will default to the start-of
* end-of value.
* @param start. The start of the ColumnSlice inclusive
* @param finish. The end of the ColumnSlice inclusive
*/
struct ColumnSlice {
1: optional binary start,
2: optional binary finish
}
/**
* Used to perform multiple slices on a single row key in one rpc operation
* @param key. The row key to be multi sliced
* @param column_parent. The column family (super columns are unsupported)
* @param column_slices. 0 to many ColumnSlice objects each will be used to select columns
* @param reversed. Direction of slice
* @param count. Maximum number of columns
* @param consistency_level. Level to perform the operation at
*/
struct MultiSliceRequest {
1: optional binary key,
2: optional ColumnParent column_parent,
3: optional list<ColumnSlice> column_slices,
4: optional bool reversed=false,
5: optional i32 count=1000,
6: optional ConsistencyLevel consistency_level=ConsistencyLevel.ONE
}
service Cassandra {
# auth methods
void login(1: required AuthenticationRequest auth_request) throws (1:AuthenticationException authnx, 2:AuthorizationException authzx),
# set keyspace
void set_keyspace(1: required string keyspace) throws (1:InvalidRequestException ire),
# retrieval methods
/**
Get the Column or SuperColumn at the given column_path. If no value is present, NotFoundException is thrown. (This is
the only method that can throw an exception under non-failure conditions.)
*/
ColumnOrSuperColumn get(1:required binary key,
2:required ColumnPath column_path,
3:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:NotFoundException nfe, 3:UnavailableException ue, 4:TimedOutException te),
/**
Get the group of columns contained by column_parent (either a ColumnFamily name or a ColumnFamily/SuperColumn name
pair) specified by the given SlicePredicate. If no matching values are found, an empty list is returned.
*/
list<ColumnOrSuperColumn> get_slice(1:required binary key,
2:required ColumnParent column_parent,
3:required SlicePredicate predicate,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
returns the number of columns matching <code>predicate</code> for a particular <code>key</code>,
<code>ColumnFamily</code> and optionally <code>SuperColumn</code>.
*/
i32 get_count(1:required binary key,
2:required ColumnParent column_parent,
3:required SlicePredicate predicate,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Performs a get_slice for column_parent and predicate for the given keys in parallel.
*/
map<binary,list<ColumnOrSuperColumn>> multiget_slice(1:required list<binary> keys,
2:required ColumnParent column_parent,
3:required SlicePredicate predicate,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Perform a get_count in parallel on the given list<binary> keys. The return value maps keys to the count found.
*/
map<binary, i32> multiget_count(1:required list<binary> keys,
2:required ColumnParent column_parent,
3:required SlicePredicate predicate,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
returns a subset of columns for a contiguous range of keys.
*/
list<KeySlice> get_range_slices(1:required ColumnParent column_parent,
2:required SlicePredicate predicate,
3:required KeyRange range,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
returns a range of columns, wrapping to the next rows if necessary to collect max_results.
*/
list<KeySlice> get_paged_slice(1:required string column_family,
2:required KeyRange range,
3:required binary start_column,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Returns the subset of columns specified in SlicePredicate for the rows matching the IndexClause
@deprecated use get_range_slices instead with range.row_filter specified
*/
list<KeySlice> get_indexed_slices(1:required ColumnParent column_parent,
2:required IndexClause index_clause,
3:required SlicePredicate column_predicate,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
# modification methods
/**
* Insert a Column at the given column_parent.column_family and optional column_parent.super_column.
*/
void insert(1:required binary key,
2:required ColumnParent column_parent,
3:required Column column,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
* Increment or decrement a counter.
*/
void add(1:required binary key,
2:required ColumnParent column_parent,
3:required CounterColumn column,
4:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
* Atomic compare and set.
*
* If the cas is successful, the success boolean in CASResult will be true and there will be no current_values.
* Otherwise, success will be false and current_values will contain the current values for the columns in
* expected (that, by definition of compare-and-set, will differ from the values in expected).
*
* A cas operation takes 2 consistency level. The first one, serial_consistency_level, simply indicates the
* level of serialization required. This can be either ConsistencyLevel.SERIAL or ConsistencyLevel.LOCAL_SERIAL.
* The second one, commit_consistency_level, defines the consistency level for the commit phase of the cas. This
* is a more traditional consistency level (the same CL than for traditional writes are accepted) that impact
* the visibility for reads of the operation. For instance, if commit_consistency_level is QUORUM, then it is
* guaranteed that a followup QUORUM read will see the cas write (if that one was successful obviously). If
* commit_consistency_level is ANY, you will need to use a SERIAL/LOCAL_SERIAL read to be guaranteed to see
* the write.
*/
CASResult cas(1:required binary key,
2:required string column_family,
3:list<Column> expected,
4:list<Column> updates,
5:required ConsistencyLevel serial_consistency_level=ConsistencyLevel.SERIAL,
6:required ConsistencyLevel commit_consistency_level=ConsistencyLevel.QUORUM)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Remove data from the row specified by key at the granularity specified by column_path, and the given timestamp. Note
that all the values in column_path besides column_path.column_family are truly optional: you can remove the entire
row by just specifying the ColumnFamily, or you can remove a SuperColumn or a single Column by specifying those levels too.
*/
void remove(1:required binary key,
2:required ColumnPath column_path,
3:required i64 timestamp,
4:ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
* Remove a counter at the specified location.
* Note that counters have limited support for deletes: if you remove a counter, you must wait to issue any following update
* until the delete has reached all the nodes and all of them have been fully compacted.
*/
void remove_counter(1:required binary key,
2:required ColumnPath path,
3:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
**/
void batch_mutate(1:required map<binary, map<string, list<Mutation>>> mutation_map,
2:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Atomically mutate many columns or super columns for many row keys. See also: Mutation.
mutation_map maps key to column family to a list of Mutation objects to take place at that scope.
**/
void atomic_batch_mutate(1:required map<binary, map<string, list<Mutation>>> mutation_map,
2:required ConsistencyLevel consistency_level=ConsistencyLevel.ONE)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
/**
Truncate will mark and entire column family as deleted.
From the user's perspective a successful call to truncate will result complete data deletion from cfname.
Internally, however, disk space will not be immediatily released, as with all deletes in cassandra, this one
only marks the data as deleted.
The operation succeeds only if all hosts in the cluster at available and will throw an UnavailableException if
some hosts are down.
*/
void truncate(1:required string cfname)
throws (1: InvalidRequestException ire, 2: UnavailableException ue, 3: TimedOutException te),
/**
* Select multiple slices of a key in a single RPC operation
*/
list<ColumnOrSuperColumn> get_multi_slice(1:required MultiSliceRequest request)
throws (1:InvalidRequestException ire, 2:UnavailableException ue, 3:TimedOutException te),
// Meta-APIs -- APIs to get information about the node or cluster,
// rather than user data. The nodeprobe program provides usage examples.
/**
* for each schema version present in the cluster, returns a list of nodes at that version.
* hosts that do not respond will be under the key DatabaseDescriptor.INITIAL_VERSION.
* the cluster is all on the same version if the size of the map is 1.
*/
map<string, list<string>> describe_schema_versions()
throws (1: InvalidRequestException ire),
/** list the defined keyspaces in this cluster */
list<KsDef> describe_keyspaces()
throws (1:InvalidRequestException ire),
/** get the cluster name */
string describe_cluster_name(),
/** get the thrift api version */
string describe_version(),
/** get the token ring: a map of ranges to host addresses,
represented as a set of TokenRange instead of a map from range
to list of endpoints, because you can't use Thrift structs as
map keys:
https://issues.apache.org/jira/browse/THRIFT-162
for the same reason, we can't return a set here, even though
order is neither important nor predictable. */
list<TokenRange> describe_ring(1:required string keyspace)
throws (1:InvalidRequestException ire),
/** same as describe_ring, but considers only nodes in the local DC */
list<TokenRange> describe_local_ring(1:required string keyspace)
throws (1:InvalidRequestException ire),
/** get the mapping between token->node ip
without taking replication into consideration
https://issues.apache.org/jira/browse/CASSANDRA-4092 */
map<string, string> describe_token_map()
throws (1:InvalidRequestException ire),
/** returns the partitioner used by this cluster */
string describe_partitioner(),
/** returns the snitch used by this cluster */
string describe_snitch(),
/** describe specified keyspace */
KsDef describe_keyspace(1:required string keyspace)
throws (1:NotFoundException nfe, 2:InvalidRequestException ire),
/** experimental API for hadoop/parallel query support.
may change violently and without warning.
returns list of token strings such that first subrange is (list[0], list[1]],
next is (list[1], list[2]], etc. */
list<string> describe_splits(1:required string cfName,
2:required string start_token,
3:required string end_token,
4:required i32 keys_per_split)
throws (1:InvalidRequestException ire),
/** Enables tracing for the next query in this connection and returns the UUID for that trace session
The next query will be traced independently of trace probability and the returned UUID can be used to query the trace keyspace */
binary trace_next_query(),
list<CfSplit> describe_splits_ex(1:required string cfName,
2:required string start_token,
3:required string end_token,
4:required i32 keys_per_split)
throws (1:InvalidRequestException ire),
/** adds a column family. returns the new schema id. */
string system_add_column_family(1:required CfDef cf_def)
throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde),
/** drops a column family. returns the new schema id. */
string system_drop_column_family(1:required string column_family)
throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde),
/** adds a keyspace and any column families that are part of it. returns the new schema id. */
string system_add_keyspace(1:required KsDef ks_def)
throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde),
/** drops a keyspace and any column families that are part of it. returns the new schema id. */
string system_drop_keyspace(1:required string keyspace)
throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde),
/** updates properties of a keyspace. returns the new schema id. */
string system_update_keyspace(1:required KsDef ks_def)
throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde),
/** updates properties of a column family. returns the new schema id. */
string system_update_column_family(1:required CfDef cf_def)
throws (1:InvalidRequestException ire, 2:SchemaDisagreementException sde),
/**
* @deprecated Throws InvalidRequestException since 3.0. Please use the CQL3 version instead.
*/
CqlResult execute_cql_query(1:required binary query, 2:required Compression compression)
throws (1:InvalidRequestException ire,
2:UnavailableException ue,
3:TimedOutException te,
4:SchemaDisagreementException sde)
/**
* Executes a CQL3 (Cassandra Query Language) statement and returns a
* CqlResult containing the results.
*/
CqlResult execute_cql3_query(1:required binary query, 2:required Compression compression, 3:required ConsistencyLevel consistency)
throws (1:InvalidRequestException ire,
2:UnavailableException ue,
3:TimedOutException te,
4:SchemaDisagreementException sde)
/**
* @deprecated Throws InvalidRequestException since 3.0. Please use the CQL3 version instead.
*/
CqlPreparedResult prepare_cql_query(1:required binary query, 2:required Compression compression)
throws (1:InvalidRequestException ire)
/**
* Prepare a CQL3 (Cassandra Query Language) statement by compiling and returning
* - the type of CQL statement
* - an id token of the compiled CQL stored on the server side.
* - a count of the discovered bound markers in the statement
*/
CqlPreparedResult prepare_cql3_query(1:required binary query, 2:required Compression compression)
throws (1:InvalidRequestException ire)
/**
* @deprecated Throws InvalidRequestException since 3.0. Please use the CQL3 version instead.
*/
CqlResult execute_prepared_cql_query(1:required i32 itemId, 2:required list<binary> values)
throws (1:InvalidRequestException ire,
2:UnavailableException ue,
3:TimedOutException te,
4:SchemaDisagreementException sde)
/**
* Executes a prepared CQL3 (Cassandra Query Language) statement by passing an id token, a list of variables
* to bind, and the consistency level, and returns a CqlResult containing the results.
*/
CqlResult execute_prepared_cql3_query(1:required i32 itemId, 2:required list<binary> values, 3:required ConsistencyLevel consistency)
throws (1:InvalidRequestException ire,
2:UnavailableException ue,
3:TimedOutException te,
4:SchemaDisagreementException sde)
/**
* @deprecated This is now a no-op. Please use the CQL3 specific methods instead.
*/
void set_cql_version(1: required string version) throws (1:InvalidRequestException ire)
}

View File

@@ -83,7 +83,6 @@
#include "sstables_loader.hh"
#include "cql3/cql_config.hh"
#include "transport/controller.hh"
#include "thrift/controller.hh"
#include "service/memory_limiter.hh"
#include "service/endpoint_lifecycle_subscriber.hh"
#include "db/schema_tables.hh"
@@ -2047,7 +2046,6 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
// Register controllers after drain_on_shutdown() below, so that even on start
// failure drain is called and stops controllers
cql_transport::controller cql_server_ctl(auth_service, mm_notifier, gossiper, qp, service_memory_limiter, sl_controller, lifecycle_notifier, *cfg, cql_sg_stats_key, maintenance_socket_enabled::no, dbcfg.statement_scheduling_group);
::thrift_controller thrift_ctl(db, auth_service, qp, service_memory_limiter, ss, proxy, dbcfg.statement_scheduling_group);
alternator::controller alternator_ctl(gossiper, proxy, mm, sys_dist_ks, cdc_generation_service, service_memory_limiter, auth_service, sl_controller, *cfg, dbcfg.statement_scheduling_group);
redis::controller redis_ctl(proxy, auth_service, mm, *cfg, gossiper, dbcfg.statement_scheduling_group);
@@ -2062,10 +2060,9 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
api::unset_transport_controller(ctx).get();
});
ss.local().register_protocol_server(thrift_ctl, cfg->start_rpc()).get();
api::set_rpc_controller(ctx, thrift_ctl).get();
auto stop_rpc_controller = defer_verbose_shutdown("rpc controller API", [&ctx] {
api::unset_rpc_controller(ctx).get();
api::set_thrift_controller(ctx).get();
auto stop_thrift_controller = defer_verbose_shutdown("thrift controller API", [&ctx] {
api::unset_thrift_controller(ctx).get();
});
ss.local().register_protocol_server(alternator_ctl, cfg->alternator_port() || cfg->alternator_https_port()).get();

View File

@@ -328,9 +328,6 @@ void schema::rebuild() {
_column_mapping = column_mapping(std::move(cm_columns), static_columns_count());
}
thrift()._compound = is_compound();
thrift()._is_dynamic = clustering_key_size() > 0;
if (is_counter()) {
for (auto&& cdef : boost::range::join(static_columns(), regular_columns())) {
if (!cdef.type->is_counter()) {
@@ -415,8 +412,6 @@ schema::schema(private_tag, const raw_schema& raw, std::optional<raw_view_info>
def._dropped_at = std::max(def._dropped_at, dropped_at_it->second.timestamp);
}
def._thrift_bits = column_definition::thrift_bits();
{
// is_on_all_components
// TODO : In origin, this predicate is "componentIndex == null", which is true in
@@ -436,7 +431,6 @@ schema::schema(private_tag, const raw_schema& raw, std::optional<raw_view_info>
[[fallthrough]];
default:
// Or any other column where "comparator" is not compound
def._thrift_bits.is_on_all_components = !thrift().has_compound_comparator();
break;
}
}
@@ -519,18 +513,6 @@ schema::registry_entry() const noexcept {
return _registry_entry;
}
sstring schema::thrift_key_validator() const {
if (partition_key_size() == 1) {
return partition_key_columns().begin()->type->name();
} else {
auto type_params = fmt::join(partition_key_columns()
| boost::adaptors::transformed(std::mem_fn(&column_definition::type))
| boost::adaptors::transformed(std::mem_fn(&abstract_type::name)),
", ");
return format("org.apache.cassandra.db.marshal.CompositeType({})", type_params);
}
}
bool
schema::has_multi_cell_collections() const {
return boost::algorithm::any_of(all_columns(), [] (const column_definition& cdef) {
@@ -699,7 +681,6 @@ auto fmt::formatter<schema>::format(const schema& s, fmt::format_context& ctx) c
out = fmt::format_to(out, ",comment={}", s._raw._comment);
out = fmt::format_to(out, ",tombstoneGcOptions={}", s.tombstone_gc_options().to_sstring());
out = fmt::format_to(out, ",gcGraceSeconds={}", s._raw._gc_grace_seconds);
out = fmt::format_to(out, ",keyValidator={}", s.thrift_key_validator());
out = fmt::format_to(out, ",minCompactionThreshold={}", s._raw._min_compaction_threshold);
out = fmt::format_to(out, ",maxCompactionThreshold={}", s._raw._max_compaction_threshold);
out = fmt::format_to(out, ",columnMetadata=[");
@@ -1053,14 +1034,6 @@ generate_legacy_id(const sstring& ks_name, const sstring& cf_name) {
return table_id(utils::UUID_gen::get_name_UUID(ks_name + cf_name));
}
bool thrift_schema::has_compound_comparator() const {
return _compound;
}
bool thrift_schema::is_dynamic() const {
return _is_dynamic;
}
schema_builder& schema_builder::set_compaction_strategy_options(std::map<sstring, sstring>&& options) {
_raw._compaction_strategy_options = std::move(options);
return *this;

View File

@@ -305,7 +305,6 @@ public:
, _is_counter(other._is_counter)
, _is_view_virtual(other._is_view_virtual)
, _computation(other.get_computation_ptr())
, _thrift_bits(other._thrift_bits)
, type(other.type)
, id(other.id)
, ordinal_id(other.ordinal_id)
@@ -376,18 +375,6 @@ public:
class schema_builder;
/*
* Sub-schema for thrift aspects. Should be kept isolated (and starved)
*/
class thrift_schema {
bool _compound = true;
bool _is_dynamic = false;
public:
bool has_compound_comparator() const;
bool is_dynamic() const;
friend class schema;
};
bool operator==(const column_definition&, const column_definition&);
static constexpr int DEFAULT_MIN_COMPACTION_THRESHOLD = 4;
@@ -610,7 +597,6 @@ private:
};
raw_schema _raw;
schema_static_props _static_props;
thrift_schema _thrift;
v3_columns _v3_columns;
mutable schema_registry_entry* _registry_entry = nullptr;
std::unique_ptr<::view_info> _view_info;
@@ -674,7 +660,6 @@ public:
double bloom_filter_fp_chance() const {
return _raw._bloom_filter_fp_chance;
}
sstring thrift_key_validator() const;
const compression_parameters& get_compressor_params() const {
return _raw._compressor_params;
}
@@ -699,12 +684,6 @@ public:
return !is_super() && !is_dense() && !is_compound();
}
thrift_schema& thrift() {
return _thrift;
}
const thrift_schema& thrift() const {
return _thrift;
}
const table_id& id() const {
return _raw._id;
}

View File

@@ -33,7 +33,6 @@
_long_description_prefix: ["total number of write requests", "number of write requests that failed", "background_replica_writes_failed", "number of write operations in a read repair context"]
_category: "storage_proxy_coordinator"
allowmismatch: true
"thrift/server.cc": skip
"tracing/tracing.cc":
params:
"max_pending_trace_records + write_event_records_threshold": "max_pending_trace_records + write_event_records_threshold"

View File

@@ -69,7 +69,6 @@ private:
, _user(cs->_user)
, _auth_state(cs->_auth_state)
, _is_internal(cs->_is_internal)
, _is_thrift(cs->_is_thrift)
, _remote_address(cs->_remote_address)
, _auth_service(auth_service ? &auth_service->local() : nullptr)
, _sl_controller(sl_controller ? &sl_controller->local() : nullptr)
@@ -110,7 +109,6 @@ private:
// isInternal is used to mark ClientState as used by some internal component
// that should have an ability to modify system keyspace.
bool _is_internal;
bool _is_thrift;
// The biggest timestamp that was returned by getTimestamp/assigned to a query
static thread_local api::timestamp_type _last_timestamp_micros;
@@ -162,10 +160,8 @@ public:
auth::service& auth_service,
qos::service_level_controller* sl_controller,
timeout_config timeout_config,
const socket_address& remote_address = socket_address(),
bool thrift = false)
const socket_address& remote_address = socket_address())
: _is_internal(false)
, _is_thrift(thrift)
, _remote_address(remote_address)
, _auth_service(&auth_service)
, _sl_controller(sl_controller)
@@ -202,7 +198,6 @@ public:
client_state(internal_tag, const timeout_config& config)
: _keyspace("system")
, _is_internal(true)
, _is_thrift(false)
, _default_timeout_config(config)
, _timeout_config(config)
{}
@@ -211,7 +206,6 @@ public:
: _user(auth::authenticated_user(username))
, _auth_state(auth_state::READY)
, _is_internal(true)
, _is_thrift(false)
, _auth_service(&auth_service)
, _sl_controller(&sl_controller)
{}
@@ -226,10 +220,6 @@ public:
return _auth_service;
}
bool is_thrift() const {
return _is_thrift;
}
bool is_internal() const {
return _is_internal;
}

View File

@@ -715,7 +715,7 @@ future<> prepare_new_column_family_announcement(std::vector<mutation>& mutations
}
future<std::vector<mutation>> prepare_column_family_update_announcement(storage_proxy& sp,
schema_ptr cfm, bool from_thrift, std::vector<view_ptr> view_updates, api::timestamp_type ts) {
schema_ptr cfm, std::vector<view_ptr> view_updates, api::timestamp_type ts) {
warn(unimplemented::cause::VALIDATION);
#if 0
cfm.validate();
@@ -731,7 +731,7 @@ future<std::vector<mutation>> prepare_column_family_update_announcement(storage_
auto mutations = co_await seastar::async([&] {
// Can call notifier when it creates new indexes, so needs to run in Seastar thread
return db::schema_tables::make_update_table_mutations(db, keyspace, old_schema, cfm, ts, from_thrift);
return db::schema_tables::make_update_table_mutations(db, keyspace, old_schema, cfm, ts);
});
for (auto&& view : view_updates) {
auto& old_view = keyspace->cf_meta_data().at(view->cf_name());
@@ -836,7 +836,7 @@ future<std::vector<mutation>> prepare_column_family_drop_announcement(storage_pr
std::vector<mutation> drop_si_mutations;
if (!schema->all_indices().empty()) {
auto builder = schema_builder(schema).without_indexes();
drop_si_mutations = db::schema_tables::make_update_table_mutations(db, keyspace, schema, builder.build(), ts, false);
drop_si_mutations = db::schema_tables::make_update_table_mutations(db, keyspace, schema, builder.build(), ts);
}
auto mutations = db::schema_tables::make_drop_table_mutations(keyspace, schema, ts);
mutations.insert(mutations.end(), std::make_move_iterator(drop_si_mutations.begin()), std::make_move_iterator(drop_si_mutations.end()));

View File

@@ -215,7 +215,7 @@ std::vector<mutation> prepare_new_keyspace_announcement(replica::database& db, l
// The timestamp parameter can be used to ensure that all nodes update their internal tables' schemas
// with identical timestamps, which can prevent an undeeded schema exchange
future<std::vector<mutation>> prepare_column_family_update_announcement(storage_proxy& sp,
schema_ptr cfm, bool from_thrift, std::vector<view_ptr> view_updates, api::timestamp_type ts);
schema_ptr cfm, std::vector<view_ptr> view_updates, api::timestamp_type ts);
future<std::vector<mutation>> prepare_new_column_family_announcement(storage_proxy& sp, schema_ptr cfm, api::timestamp_type timestamp);
// The ksm parameter can describe a keyspace that hasn't been created yet.

View File

@@ -76,7 +76,7 @@ future<> table_helper::cache_table_info(cql3::query_processor& qp, service::migr
return now();
}
return qp.prepare(_insert_cql, qs.get_client_state(), false)
return qp.prepare(_insert_cql, qs.get_client_state())
.then([this] (shared_ptr<cql_transport::messages::result_message::prepared> msg_ptr) noexcept {
_prepared_stmt = std::move(msg_ptr->get_prepared());
shared_ptr<cql3::cql_statement> cql_stmt = _prepared_stmt->statement;
@@ -91,7 +91,7 @@ future<> table_helper::cache_table_info(cql3::query_processor& qp, service::migr
// we have already prepared the fallback statement
return now();
}
return qp.prepare(_insert_cql_fallback.value(), qs.get_client_state(), false)
return qp.prepare(_insert_cql_fallback.value(), qs.get_client_state())
.then([this] (shared_ptr<cql_transport::messages::result_message::prepared> msg_ptr) noexcept {
_prepared_stmt = std::move(msg_ptr->get_prepared());
shared_ptr<cql3::cql_statement> cql_stmt = _prepared_stmt->statement;

View File

@@ -158,10 +158,10 @@ partitioner: org.apache.cassandra.dht.Murmur3Partitioner
# commitlog_directory: /var/lib/cassandra/commitlog
# policy for data disk failures:
# die: shut down gossip and Thrift and kill the JVM for any fs errors or
# die: shut down gossip and kill the JVM for any fs errors or
# single-sstable errors, so the node can be replaced.
# stop_paranoid: shut down gossip and Thrift even for single-sstable errors.
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# stop_paranoid: shut down gossip even for single-sstable errors.
# stop: shut down gossip, leaving the node effectively dead, but
# can still be inspected via JMX.
# best_effort: stop using the failed disk and respond to requests based on
# remaining available sstables. This means you WILL see obsolete
@@ -170,8 +170,8 @@ partitioner: org.apache.cassandra.dht.Murmur3Partitioner
disk_failure_policy: stop
# policy for commit disk failures:
# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced.
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# die: shut down gossip and kill the JVM, so the node can be replaced.
# stop: shut down gossip, leaving the node effectively dead, but
# can still be inspected via JMX.
# stop_commit: shutdown the commit log, letting writes collect but
# continuing to service reads, as in pre-2.0.5 Cassandra
@@ -441,10 +441,7 @@ native_transport_port: 9042
# be rejected as invalid. The default is 256MB.
# native_transport_max_frame_size_in_mb: 256
# Whether to start the thrift rpc server.
start_rpc: true
# The address or interface to bind the Thrift RPC service and native transport
# The address or interface to bind the native transport
# server to.
#
# Set rpc_address OR rpc_interface, not both. Interfaces must correspond
@@ -458,9 +455,6 @@ start_rpc: true
rpc_address: localhost
# rpc_interface: eth1
# port for Thrift to listen for clients on
rpc_port: 9160
# RPC address to broadcast to drivers and other Cassandra nodes. This cannot
# be set to 0.0.0.0. If left blank, this will be set to the value of
# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must
@@ -470,43 +464,6 @@ rpc_port: 9160
# enable or disable keepalive on rpc/native connections
rpc_keepalive: true
# Cassandra provides two out-of-the-box options for the RPC Server:
#
# sync -> One thread per thrift connection. For a very large number of clients, memory
# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
# per thread, and that will correspond to your use of virtual memory (but physical memory
# may be limited depending on use of stack space).
#
# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
# asynchronously using a small number of threads that does not vary with the amount
# of thrift clients (and thus scales well to many clients). The rpc requests are still
# synchronous (one thread per active request). If hsha is selected then it is essential
# that rpc_max_threads is changed from the default value of unlimited.
#
# The default is sync because on Windows hsha is about 30% slower. On Linux,
# sync/hsha performance is about the same, with hsha of course using less memory.
#
# Alternatively, can provide your own RPC server by providing the fully-qualified class name
# of an o.a.c.t.TServerFactory that can create an instance of it.
rpc_server_type: sync
# Uncomment rpc_min|max_thread to set request pool size limits.
#
# Regardless of your choice of RPC server (see above), the number of maximum requests in the
# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
# RPC server, it also dictates the number of clients that can be connected at all).
#
# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
# uncomment to set socket buffer sizes on rpc connections
# rpc_send_buff_size_in_bytes:
# rpc_recv_buff_size_in_bytes:
# Uncomment to set socket buffer size for internode communication
# Note that when setting this, the buffer size is limited by net.core.wmem_max
# and when not setting it it is defined by net.ipv4.tcp_wmem
@@ -519,9 +476,6 @@ rpc_server_type: sync
# internode_send_buff_size_in_bytes:
# internode_recv_buff_size_in_bytes:
# Frame size for thrift (maximum message length).
thrift_framed_transport_size_in_mb: 15
# Set to true to have Cassandra create a hard link to each sstable
# flushed or streamed locally in a backups/ subdirectory of the
# keyspace data. Removing these links is the operator's

View File

@@ -54,7 +54,6 @@ SEASTAR_TEST_CASE(test_functions) {
virtual void visit(const result_message::void_message&) override { throw "bad"; }
virtual void visit(const result_message::set_keyspace&) override { throw "bad"; }
virtual void visit(const result_message::prepared::cql&) override { throw "bad"; }
virtual void visit(const result_message::prepared::thrift&) override { throw "bad"; }
virtual void visit(const result_message::schema_change&) override { throw "bad"; }
virtual void visit(const result_message::rows& rows) override {
const auto& rs = rows.rs().result_set();

View File

@@ -63,7 +63,7 @@ SEASTAR_TEST_CASE(test_new_schema_with_no_structural_change_is_propagated) {
auto group0_guard = mm.start_group0_operation().get();
auto ts = group0_guard.write_timestamp();
mm.announce(service::prepare_column_family_update_announcement(mm.get_storage_proxy(),
new_schema, false, std::vector<view_ptr>(), ts).get(), std::move(group0_guard), "").get();
new_schema, std::vector<view_ptr>(), ts).get(), std::move(group0_guard), "").get();
BOOST_REQUIRE_NE(e.db().local().find_schema(old_schema->id())->version(), old_table_version);
BOOST_REQUIRE_NE(e.db().local().get_version(), old_node_version);
@@ -100,7 +100,7 @@ SEASTAR_TEST_CASE(test_schema_is_updated_in_keyspace) {
auto group0_guard = mm.start_group0_operation().get();
auto ts = group0_guard.write_timestamp();
mm.announce(service::prepare_column_family_update_announcement(mm.get_storage_proxy(),
new_schema, false, std::vector<view_ptr>(), ts).get(), std::move(group0_guard), "").get();
new_schema, std::vector<view_ptr>(), ts).get(), std::move(group0_guard), "").get();
s = e.local_db().find_schema(old_schema->id());
BOOST_REQUIRE_NE(*old_schema, *s);
@@ -198,7 +198,7 @@ SEASTAR_TEST_CASE(test_concurrent_column_addition) {
auto group0_guard = mm.start_group0_operation().get();
auto&& keyspace = e.db().local().find_keyspace(s0->ks_name()).metadata();
auto muts = db::schema_tables::make_update_table_mutations(e.db().local(), keyspace, s0, s2,
group0_guard.write_timestamp(), false);
group0_guard.write_timestamp());
mm.announce(std::move(muts), std::move(group0_guard), "").get();
}
@@ -364,7 +364,7 @@ SEASTAR_TEST_CASE(test_combined_column_add_and_drop) {
{
auto group0_guard = mm.start_group0_operation().get();
auto muts = db::schema_tables::make_update_table_mutations(e.db().local(), keyspace, s1, s2,
group0_guard.write_timestamp(), false);
group0_guard.write_timestamp());
mm.announce(std::move(muts), std::move(group0_guard), "").get();
}
@@ -382,7 +382,7 @@ SEASTAR_TEST_CASE(test_combined_column_add_and_drop) {
auto group0_guard = mm.start_group0_operation().get();
auto muts = db::schema_tables::make_update_table_mutations(e.db().local(), keyspace, s3, s4,
group0_guard.write_timestamp(), false);
group0_guard.write_timestamp());
mm.announce(std::move(muts), std::move(group0_guard), "").get();
}

View File

@@ -100,7 +100,7 @@ def test_info(request, nodetool, display_all_tokens):
expected_requests = [
expected_request('GET', '/storage_service/gossiping', response=True),
expected_request('GET', '/storage_service/hostid/local', response=host_id),
expected_request('GET', '/storage_service/rpc_server', response=True),
expected_request('GET', '/storage_service/rpc_server', response=False),
expected_request('GET', '/storage_service/native_transport', response=True),
expected_request('GET', '/storage_service/load', response=load),
expected_request('GET', '/storage_service/generation_number', response=generation_number),
@@ -175,7 +175,7 @@ def test_info(request, nodetool, display_all_tokens):
expected_output = f'''\
{'ID':<23}: {host_id}
{'Gossip active':<23}: true
{'Thrift active':<23}: true
{'Thrift active':<23}: false
{'Native Transport active':<23}: true
{'Load':<23}: {format_size(load)}
{'Generation No':<23}: {generation_number}

View File

@@ -1,21 +0,0 @@
add_library(thrift STATIC)
target_sources(thrift
PRIVATE
controller.cc
handler.cc
server.cc
thrift_validation.cc)
target_include_directories(thrift
PUBLIC
${CMAKE_SOURCE_DIR})
target_link_libraries(thrift
PUBLIC
Seastar::seastar
xxHash::xxhash
PRIVATE
interface
absl::headers
Thrift::thrift)
check_headers(check-headers thrift
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)

View File

@@ -1,130 +0,0 @@
/*
* Copyright (C) 2020-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#include "thrift/controller.hh"
#include <seastar/core/sharded.hh>
#include "thrift/server.hh"
#include "replica/database.hh"
#include "db/config.hh"
#include "log.hh"
static logging::logger clogger("thrift_controller");
thrift_controller::thrift_controller(distributed<replica::database>& db, sharded<auth::service>& auth,
sharded<cql3::query_processor>& qp, sharded<service::memory_limiter>& ml,
sharded<service::storage_service>& ss, sharded<service::storage_proxy>& proxy,
seastar::scheduling_group sg)
: protocol_server(sg)
, _ops_sem(1)
, _db(db)
, _auth_service(auth)
, _qp(qp)
, _mem_limiter(ml)
, _ss(ss)
, _proxy(proxy)
{ }
sstring thrift_controller::name() const {
return "rpc";
}
sstring thrift_controller::protocol() const {
return "thrift";
}
sstring thrift_controller::protocol_version() const {
return ::cassandra::thrift_version;
}
std::vector<socket_address> thrift_controller::listen_addresses() const {
if (_server && _addr) {
return {*_addr};
}
return {};
}
future<> thrift_controller::start_server() {
if (!_ops_sem.try_wait()) {
throw std::runtime_error(format("Thrift server is stopping, try again later"));
}
return do_start_server().finally([this] { _ops_sem.signal(); });
}
future<> thrift_controller::do_start_server() {
if (_server) {
return make_ready_future<>();
}
seastar::thread_attributes attr;
attr.sched_group = _sched_group;
return seastar::async(std::move(attr), [this] {
auto tserver = std::make_unique<distributed<thrift_server>>();
_addr.reset();
auto& cfg = _db.local().get_config();
auto preferred = cfg.rpc_interface_prefer_ipv6() ? std::make_optional(net::inet_address::family::INET6) : std::nullopt;
auto family = cfg.enable_ipv6_dns_lookup() || preferred ? std::nullopt : std::make_optional(net::inet_address::family::INET);
auto keepalive = cfg.rpc_keepalive();
auto ip = utils::resolve(cfg.rpc_address, family, preferred).get();
auto port = cfg.rpc_port();
_addr.emplace(ip, port);
auto tsc = sharded_parameter([&cfg] {
return thrift_server_config {
.timeout_config = updateable_timeout_config(cfg),
.max_request_size = cfg.thrift_max_message_length_in_mb() * (uint64_t(1) << 20),
};
});
tserver->start(sharded_parameter([this] { return _db.local().as_data_dictionary(); }), std::ref(_qp), std::ref(_ss), std::ref(_proxy), std::ref(_auth_service), std::ref(_mem_limiter), std::move(tsc)).get();
// #293 - do not stop anything
//engine().at_exit([tserver] {
// return tserver->stop();
//});
tserver->invoke_on_all(&thrift_server::listen, socket_address{ip, port}, keepalive).get();
clogger.info("Thrift server listening on {}:{} ...", ip, port);
_server = std::move(tserver);
});
}
future<> thrift_controller::stop_server() {
assert(this_shard_id() == 0);
if (_stopped) {
return make_ready_future<>();
}
return _ops_sem.wait().then([this] {
_stopped = true;
_ops_sem.broken();
_addr.reset();
return do_stop_server();
});
}
future<> thrift_controller::request_stop_server() {
if (!_ops_sem.try_wait()) {
throw std::runtime_error(format("Thrift server is starting, try again later"));
}
return with_scheduling_group(_sched_group, [this] {
return do_stop_server();
}).finally([this] { _ops_sem.signal(); });
}
future<> thrift_controller::do_stop_server() {
return do_with(std::move(_server), [] (std::unique_ptr<distributed<thrift_server>>& tserver) {
if (tserver) {
return tserver->stop().then([] {
clogger.info("Thrift server stopped");
});
}
return make_ready_future<>();
});
}

View File

@@ -1,57 +0,0 @@
/*
* Copyright (C) 2020-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#pragma once
#include <seastar/core/semaphore.hh>
#include <seastar/core/distributed.hh>
#include <seastar/core/future.hh>
#include "service/memory_limiter.hh"
#include "protocol_server.hh"
using namespace seastar;
class thrift_server;
namespace replica {
class database;
}
namespace auth { class service; }
namespace cql3 { class query_processor; }
namespace service {
class storage_service;
class storage_proxy;
}
class thrift_controller : public protocol_server {
std::unique_ptr<distributed<thrift_server>> _server;
std::optional<socket_address> _addr;
semaphore _ops_sem; /* protects start/stop operations on _server */
bool _stopped = false;
distributed<replica::database>& _db;
sharded<auth::service>& _auth_service;
sharded<cql3::query_processor>& _qp;
sharded<service::memory_limiter>& _mem_limiter;
sharded<service::storage_service>& _ss;
sharded<service::storage_proxy>& _proxy;
future<> do_start_server();
future<> do_stop_server();
public:
thrift_controller(distributed<replica::database>&, sharded<auth::service>&, sharded<cql3::query_processor>&, sharded<service::memory_limiter>&, sharded<service::storage_service>& ss, sharded<service::storage_proxy>& proxy, seastar::scheduling_group sg);
virtual sstring name() const override;
virtual sstring protocol() const override;
virtual sstring protocol_version() const override;
virtual std::vector<socket_address> listen_addresses() const override;
virtual future<> start_server() override;
virtual future<> stop_server() override;
virtual future<> request_stop_server() override;
};

File diff suppressed because it is too large Load Diff

View File

@@ -1,27 +0,0 @@
/*
* Copyright (C) 2014-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#ifndef APPS_SEASTAR_THRIFT_HANDLER_HH_
#define APPS_SEASTAR_THRIFT_HANDLER_HH_
#include "Cassandra.h"
#include "auth/service.hh"
#include "cql3/query_processor.hh"
#include <memory>
struct timeout_config;
class service_permit;
namespace service { class storage_service; }
namespace data_dictionary {
class database;
}
std::unique_ptr<::cassandra::CassandraCobSvIfFactory> create_handler_factory(data_dictionary::database db, distributed<cql3::query_processor>& qp, sharded<service::storage_service>& ss, sharded<service::storage_proxy>& proxy, auth::service&, const updateable_timeout_config&, service_permit& current_permit);
#endif /* APPS_SEASTAR_THRIFT_HANDLER_HH_ */

View File

@@ -1,364 +0,0 @@
/*
* Copyright (C) 2014-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#include "server.hh"
#include "handler.hh"
#include "db/config.hh"
#include <seastar/core/future-util.hh>
#include <seastar/core/circular_buffer.hh>
#include <seastar/core/metrics.hh>
#include <seastar/net/byteorder.hh>
#include <seastar/core/scattered_message.hh>
#include <seastar/core/sleep.hh>
#include <seastar/core/coroutine.hh>
#include <seastar/core/semaphore.hh>
#include "log.hh"
#include <thrift/server/TServer.h>
#include <thrift/transport/TBufferTransports.h>
#include <thrift/TProcessor.h>
#include <thrift/protocol/TBinaryProtocol.h>
#include <thrift/async/TAsyncProcessor.h>
#include <algorithm>
#include <vector>
#ifdef THRIFT_USES_BOOST
#include <boost/make_shared.hpp>
#endif
static logging::logger tlogger("thrift");
using namespace apache::thrift;
using namespace apache::thrift::transport;
using namespace apache::thrift::protocol;
using namespace apache::thrift::async;
using namespace ::cassandra;
using namespace std::chrono_literals;
class thrift_stats {
seastar::metrics::metric_groups _metrics;
public:
thrift_stats(thrift_server& server);
};
thrift_server::thrift_server(data_dictionary::database db,
distributed<cql3::query_processor>& qp,
sharded<service::storage_service>& ss,
sharded<service::storage_proxy>& proxy,
auth::service& auth_service,
service::memory_limiter& ml,
thrift_server_config config)
: _stats(new thrift_stats(*this))
, _config(std::move(config))
, _handler_factory(create_handler_factory(db, qp, ss, proxy, auth_service, _config.timeout_config, _current_permit).release())
, _protocol_factory(new TBinaryProtocolFactoryT<TMemoryBuffer>())
, _processor_factory(new CassandraAsyncProcessorFactory(_handler_factory))
, _memory_available(ml.get_semaphore())
, _max_concurrent_requests(db.get_config().max_concurrent_requests_per_shard) {
}
thrift_server::~thrift_server() {
}
future<> thrift_server::stop() {
auto f = _stop_gate.close();
std::for_each(_listeners.begin(), _listeners.end(), std::mem_fn(&server_socket::abort_accept));
std::for_each(_connections_list.begin(), _connections_list.end(), std::mem_fn(&connection::shutdown));
return f;
}
struct handler_deleter {
CassandraCobSvIfFactory* hf;
void operator()(CassandraCobSvIf* h) const {
hf->releaseHandler(h);
}
};
// thrift uses a shared_ptr to refer to the transport (= connection),
// while we do not, so we can't have connection inherit from TTransport.
struct thrift_server::connection::fake_transport : TTransport {
fake_transport(thrift_server::connection* c) : conn(c) {}
thrift_server::connection* conn;
};
thrift_server::connection::connection(thrift_server& server, connected_socket&& fd, socket_address addr)
: _server(server), _fd(std::move(fd)), _read_buf(_fd.input())
, _write_buf(_fd.output())
, _transport(thrift_std::make_shared<thrift_server::connection::fake_transport>(this))
, _input(thrift_std::make_shared<TMemoryBuffer>())
, _output(thrift_std::make_shared<TMemoryBuffer>())
, _in_proto(_server._protocol_factory->getProtocol(_input))
, _out_proto(_server._protocol_factory->getProtocol(_output))
, _processor(_server._processor_factory->getProcessor({ _in_proto, _out_proto, _transport })) {
++_server._total_connections;
++_server._current_connections;
_server._connections_list.push_back(*this);
}
thrift_server::connection::~connection() {
if (is_linked()) {
--_server._current_connections;
_server._connections_list.erase(_server._connections_list.iterator_to(*this));
}
}
thrift_server::connection::connection(connection&& other)
: _server(other._server)
, _fd(std::move(other._fd))
, _read_buf(std::move(other._read_buf))
, _write_buf(std::move(other._write_buf))
, _transport(std::move(other._transport))
, _input(std::move(other._input))
, _output(std::move(other._output))
, _in_proto(std::move(other._in_proto))
, _out_proto(std::move(other._out_proto))
, _processor(std::move(other._processor)) {
if (other.is_linked()) {
boost::intrusive::list<connection>::node_algorithms::init(this_ptr());
boost::intrusive::list<connection>::node_algorithms::swap_nodes(other.this_ptr(), this_ptr());
}
}
future<>
thrift_server::connection::process() {
return do_until([this] { return _read_buf.eof(); },
[this] { return process_one_request(); })
.finally([this] {
return _write_buf.close();
});
}
future<>
thrift_server::connection::process_one_request() {
_input->resetBuffer();
_output->resetBuffer();
co_await read();
if (_server._requests_serving >= _server._max_concurrent_requests) {
_server._requests_shed++;
tlogger.debug("message dropped due to overload");
co_return;
}
++_server._requests_serving;
++_server._requests_served;
auto ret = _processor_promise.get_future().handle_exception([&server = _server] (const std::exception_ptr&) {
server._requests_serving--;
});
// adapt from "continuation object style" to future/promise
auto complete = [this] (bool success) mutable {
// FIXME: look at success?
_server._requests_serving--;
write().forward_to(std::move(_processor_promise));
_processor_promise = promise<>();
};
// Heuristics copied from transport/server.cc
size_t mem_estimate = 8000 + 2 * _input->available_read();
auto fut = get_units(_server._memory_available, mem_estimate);
if (_server._memory_available.waiters()) {
++_server._requests_blocked_memory;
}
auto units = co_await std::move(fut);
// NOTICE: this permit is put in the server under the assumption that no other
// connection will overwrite this permit *until* it's extracted by the code
// which handles the Thrift request (via calling obtain_permit()).
// This assumption is true because there are no preemption points between this
// insertion and the call to obtain_permit(), which was verified both by
// code inspection and confirmed empirically by running manual tests.
if (_server._current_permit.count() > 0) {
tlogger.debug("Current service permit is overwritten while its units are still held ({}). "
"This situation likely means that there's a bug in passing service permits to message handlers.",
_server._current_permit.count());
}
_server._current_permit = make_service_permit(std::move(units));
_processor->process(complete, _in_proto, _out_proto);
co_return co_await std::move(ret);
}
future<>
thrift_server::connection::read() {
return _read_buf.read_exactly(4).then([this] (temporary_buffer<char> size_buf) {
if (size_buf.size() != 4) {
return make_ready_future<>();
}
union {
uint32_t n;
char b[4];
} data;
std::copy_n(size_buf.get(), 4, data.b);
auto n = ntohl(data.n);
if (n > _server._config.max_request_size) {
// Close connection silently, we can't return a response because we did not
// read a complete frame.
tlogger.info("message size {} exceeds configured maximum {}, closing connection", n, _server._config.max_request_size);
return make_ready_future<>();
}
return _read_buf.read_exactly(n).then([this, n] (temporary_buffer<char> buf) {
if (buf.size() != n) {
// FIXME: exception perhaps?
return;
}
_in_tmp = std::move(buf); // keep ownership of the data
auto b = reinterpret_cast<uint8_t*>(_in_tmp.get_write());
_input->resetBuffer(b, _in_tmp.size());
});
});
}
future<>
thrift_server::connection::write() {
uint8_t* data;
uint32_t len;
_output->getBuffer(&data, &len);
net::packed<uint32_t> plen = { net::hton(len) };
return _write_buf.write(reinterpret_cast<char*>(&plen), 4).then([this, data, len] {
// FIXME: zero-copy
return _write_buf.write(reinterpret_cast<char*>(data), len);
}).then([this] {
return _write_buf.flush();
});
}
void
thrift_server::connection::shutdown() {
try {
_fd.shutdown_input();
_fd.shutdown_output();
} catch (...) {
}
}
future<>
thrift_server::listen(socket_address addr, bool keepalive) {
listen_options lo;
lo.reuse_address = true;
_listeners.push_back(seastar::listen(addr, lo));
do_accepts(_listeners.size() - 1, keepalive, 0);
return make_ready_future<>();
}
void
thrift_server::do_accepts(int which, bool keepalive, int num_attempts) {
if (_stop_gate.is_closed()) {
return;
}
// Future is waited on indirectly in `stop()` (via `_stop_gate`).
(void)with_gate(_stop_gate, [&, this] {
return _listeners[which].accept().then([this, which, keepalive] (accept_result ar) {
auto&& fd = ar.connection;
auto&& addr = ar.remote_address;
fd.set_nodelay(true);
fd.set_keepalive(keepalive);
// Future is waited on indirectly in `stop()` (via `_stop_gate`).
(void)with_gate(_stop_gate, [&, this] {
return do_with(connection(*this, std::move(fd), addr), [] (auto& conn) {
return conn.process().then_wrapped([&conn] (future<> f) {
conn.shutdown();
try {
f.get();
} catch (std::exception& ex) {
tlogger.debug("request error {}", ex.what());
}
});
});
});
do_accepts(which, keepalive, 0);
}).handle_exception([this, which, keepalive, num_attempts] (auto ex) {
tlogger.debug("accept failed {}", ex);
try {
std::rethrow_exception(std::move(ex));
} catch (const seastar::gate_closed_exception&) {
return;
} catch (...) {
if (_stop_gate.is_closed()) {
return;
}
// Done in the background.
(void)with_gate(_stop_gate, [this, which, keepalive, num_attempts] {
int backoff = 2 << std::max(num_attempts, 10);
tlogger.debug("sleeping for {}ms", backoff);
return sleep(std::chrono::milliseconds(backoff)).then([this, which, keepalive, num_attempts] {
tlogger.debug("retrying accept after failure");
do_accepts(which, keepalive, num_attempts + 1);
});
});
}
});
});
}
uint64_t
thrift_server::total_connections() const {
return _total_connections;
}
uint64_t
thrift_server::current_connections() const {
return _current_connections;
}
uint64_t
thrift_server::requests_served() const {
return _requests_served;
}
uint64_t
thrift_server::requests_serving() const {
return _requests_serving;
}
size_t
thrift_server::max_request_size() const {
return _config.max_request_size;
}
const semaphore&
thrift_server::memory_available() const {
return _memory_available;
}
uint64_t
thrift_server::requests_blocked_memory() const {
return _requests_blocked_memory;
}
uint64_t
thrift_server::requests_shed() const {
return _requests_shed;
}
thrift_stats::thrift_stats(thrift_server& server) {
namespace sm = seastar::metrics;
_metrics.add_group("thrift", {
sm::make_counter("thrift-connections", [&server] { return server.total_connections(); },
sm::description("Rate of creation of new Thrift connections.")),
sm::make_gauge("current_connections", [&server] { return server.current_connections(); },
sm::description("Holds a current number of opened Thrift connections.")),
sm::make_counter("served", [&server] { return server.requests_served(); },
sm::description("Rate of serving Thrift requests.")),
sm::make_gauge("serving", [&server] { return server.requests_serving(); },
sm::description("Number of Thrift requests being currently served.")),
sm::make_gauge("requests_blocked_memory_current", [&server] { return server.memory_available().waiters(); },
sm::description(
seastar::format("Holds the number of Thrift requests that are currently blocked due to reaching the memory quota limit ({}B). "
"Non-zero value indicates that our bottleneck is memory and more specifically - the memory quota allocated for the \"Thrift transport\" component.", server.max_request_size()))),
sm::make_counter("requests_blocked_memory", [&server] { return server.requests_blocked_memory(); },
sm::description(
seastar::format("Holds an incrementing counter with the Thrift requests that ever blocked due to reaching the memory quota limit ({}B). "
"The first derivative of this value shows how often we block due to memory exhaustion in the \"Thrift transport\" component.", server.max_request_size()))),
sm::make_counter("requests_shed", [&server] { return server.requests_shed(); },
sm::description("Holds an incrementing counter with the requests that were shed due to exceeding the threshold configured via max_concurrent_requests_per_shard. "
"The first derivative of this value shows how often we shed requests due to exceeding the limit in the \"Thrift transport\" component.")),
sm::make_gauge("requests_memory_available", [&server] { return server.memory_available().current(); },
sm::description(
seastar::format("Holds the amount of available memory for admitting new Thrift requests (max is {}B)."
"Zero value indicates that our bottleneck is memory and more specifically - the memory quota allocated for the \"Thrift transport\" component.", server.max_request_size())))
});
}

View File

@@ -1,140 +0,0 @@
/*
* Copyright (C) 2014-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#ifndef APPS_SEASTAR_THRIFT_SERVER_HH_
#define APPS_SEASTAR_THRIFT_SERVER_HH_
#include <seastar/core/seastar.hh>
#include <seastar/core/distributed.hh>
#include "cql3/query_processor.hh"
#include "timeout_config.hh"
#include "service/memory_limiter.hh"
#include <seastar/core/gate.hh>
#include <memory>
#include <cstdint>
#include <boost/intrusive/list.hpp>
#include "utils/updateable_value.hh"
#include "service_permit.hh"
class thrift_server;
class thrift_stats;
#ifdef THRIFT_USES_BOOST
namespace thrift_std = boost;
#else
namespace thrift_std = std;
#endif
namespace cassandra {
static const sstring thrift_version = "20.1.0";
class CassandraCobSvIfFactory;
}
namespace apache { namespace thrift { namespace protocol {
class TProtocolFactory;
class TProtocol;
}}}
namespace apache { namespace thrift { namespace async {
class TAsyncProcessor;
class TAsyncProcessorFactory;
}}}
namespace apache { namespace thrift { namespace transport {
class TMemoryBuffer;
}}}
namespace auth {
class service;
}
namespace service { class storage_service; }
namespace data_dictionary {
class database;
}
struct thrift_server_config {
::updateable_timeout_config timeout_config;
uint64_t max_request_size;
std::function<semaphore& ()> get_service_memory_limiter_semaphore;
};
class thrift_server {
class connection : public boost::intrusive::list_base_hook<> {
struct fake_transport;
thrift_server& _server;
connected_socket _fd;
input_stream<char> _read_buf;
output_stream<char> _write_buf;
temporary_buffer<char> _in_tmp;
thrift_std::shared_ptr<fake_transport> _transport;
thrift_std::shared_ptr<apache::thrift::transport::TMemoryBuffer> _input;
thrift_std::shared_ptr<apache::thrift::transport::TMemoryBuffer> _output;
thrift_std::shared_ptr<apache::thrift::protocol::TProtocol> _in_proto;
thrift_std::shared_ptr<apache::thrift::protocol::TProtocol> _out_proto;
thrift_std::shared_ptr<apache::thrift::async::TAsyncProcessor> _processor;
promise<> _processor_promise;
public:
connection(thrift_server& server, connected_socket&& fd, socket_address addr);
~connection();
connection(connection&&);
future<> process();
future<> read();
future<> write();
void shutdown();
private:
future<> process_one_request();
};
private:
std::vector<server_socket> _listeners;
std::unique_ptr<thrift_stats> _stats;
service_permit _current_permit = empty_service_permit();
thrift_server_config _config;
thrift_std::shared_ptr<::cassandra::CassandraCobSvIfFactory> _handler_factory;
std::unique_ptr<apache::thrift::protocol::TProtocolFactory> _protocol_factory;
thrift_std::shared_ptr<apache::thrift::async::TAsyncProcessorFactory> _processor_factory;
uint64_t _total_connections = 0;
uint64_t _current_connections = 0;
uint64_t _requests_served = 0;
uint64_t _requests_serving = 0;
uint64_t _requests_blocked_memory = 0;
semaphore& _memory_available;
utils::updateable_value<uint32_t> _max_concurrent_requests;
size_t _requests_shed;
boost::intrusive::list<connection> _connections_list;
seastar::gate _stop_gate;
public:
thrift_server(data_dictionary::database db, distributed<cql3::query_processor>& qp, sharded<service::storage_service>& ss, sharded<service::storage_proxy>& proxy, auth::service&, service::memory_limiter& ml, thrift_server_config config);
~thrift_server();
future<> listen(socket_address addr, bool keepalive);
future<> stop();
void do_accepts(int which, bool keepalive, int num_attempts);
uint64_t total_connections() const;
uint64_t current_connections() const;
uint64_t requests_served() const;
uint64_t requests_serving() const;
size_t max_request_size() const;
const semaphore& memory_available() const;
uint64_t requests_blocked_memory() const;
uint64_t requests_shed() const;
private:
void maybe_retry_accept(int which, bool keepalive, std::exception_ptr ex);
};
#endif /* APPS_SEASTAR_THRIFT_SERVER_HH_ */

View File

@@ -1,95 +0,0 @@
/*
* Copyright (C) 2015-present ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
*/
#include <limits>
#include "thrift_validation.hh"
#include "thrift/utils.hh"
#include "db/system_keyspace.hh"
#include <boost/regex.hpp>
using namespace thrift;
using namespace ::apache::thrift;
namespace thrift_validation {
static constexpr uint32_t MAX_UNSIGNED_SHORT = std::numeric_limits<uint16_t>::max();
void validate_key(const schema& s, const bytes_view& k) {
if (k.empty()) {
throw make_exception<InvalidRequestException>("Key may not be empty");
}
auto max = MAX_UNSIGNED_SHORT;
if (k.size() > max) {
throw make_exception<InvalidRequestException>("Key length of {} is longer than maximum of {}", k.size(), max);
}
// FIXME: implement
//s.partition_key_type()->validate(k);
}
void validate_keyspace_not_system(const std::string& keyspace) {
std::string name;
name.resize(keyspace.length());
std::transform(keyspace.begin(), keyspace.end(), name.begin(), ::tolower);
if (is_system_keyspace(name)) {
throw make_exception<InvalidRequestException>("system keyspace is not user-modifiable");
}
}
void validate_ks_def(const KsDef& ks_def) {
validate_keyspace_not_system(ks_def.name);
boost::regex name_regex("\\w+");
if (!boost::regex_match(ks_def.name, name_regex)) {
throw make_exception<InvalidRequestException>("\"{}\" is not a valid keyspace name", ks_def.name);
}
if (ks_def.name.length() > schema::NAME_LENGTH) {
throw make_exception<InvalidRequestException>("Keyspace names shouldn't be more than {} characters long (got \"{}\")", schema::NAME_LENGTH, ks_def.name);
}
}
void validate_cf_def(const CfDef& cf_def) {
boost::regex name_regex("\\w+");
if (!boost::regex_match(cf_def.name, name_regex)) {
throw make_exception<InvalidRequestException>("\"{}\" is not a valid column family name", cf_def.name);
}
if (cf_def.name.length() > schema::NAME_LENGTH) {
throw make_exception<InvalidRequestException>("Keyspace names shouldn't be more than {} characters long (got \"{}\")", schema::NAME_LENGTH, cf_def.name);
}
}
void validate_column_name(const std::string& name) {
auto max_name_length = MAX_UNSIGNED_SHORT;
if (name.size() > max_name_length) {
throw make_exception<InvalidRequestException>("column name length must not be greater than {}", max_name_length);
}
if (name.empty()) {
throw make_exception<InvalidRequestException>("column name must not be empty");
}
}
void validate_column_names(const std::vector<std::string>& names) {
for (auto&& name : names) {
validate_column_name(name);
}
}
void validate_column(const Column& col, const column_definition& def) {
if (!col.__isset.value) {
throw make_exception<InvalidRequestException>("Column value is required");
}
if (!col.__isset.timestamp) {
throw make_exception<InvalidRequestException>("Column timestamp is required");
}
def.type->validate(to_bytes_view(col.value));
}
}

View File

@@ -1,643 +0,0 @@
/*
* Copyright (C) 2015-present ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
*/
#pragma once
#include "schema/schema.hh"
#include "bytes.hh"
#include "cassandra_types.h"
using namespace ::cassandra;
#if 0
import java.nio.ByteBuffer;
import java.util.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.cassandra.config.*;
import org.apache.cassandra.cql3.ColumnIdentifier;
import org.apache.cassandra.db.*;
import org.apache.cassandra.db.composites.*;
import org.apache.cassandra.db.filter.IDiskAtomFilter;
import org.apache.cassandra.db.filter.NamesQueryFilter;
import org.apache.cassandra.db.filter.SliceQueryFilter;
import org.apache.cassandra.db.index.SecondaryIndexManager;
import org.apache.cassandra.db.marshal.AbstractType;
import org.apache.cassandra.db.marshal.ColumnToCollectionType;
import org.apache.cassandra.db.marshal.UTF8Type;
import org.apache.cassandra.dht.IPartitioner;
import org.apache.cassandra.dht.Token;
import org.apache.cassandra.serializers.MarshalException;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.FBUtilities;
#endif
/**
* This has a lot of building blocks for CassandraServer to call to make sure it has valid input
* -- ensuring column names conform to the declared comparator, for instance.
*
* The methods here mostly try to do just one part of the validation so they can be combined
* for different needs -- supercolumns vs regular, range slices vs named, batch vs single-column.
* (ValidateColumnPath is the main exception in that it includes keyspace and CF validation.)
*/
namespace thrift_validation {
#if 0
private static final Logger logger = LoggerFactory.getLogger(ThriftValidation.class);
#endif
void validate_key(const schema& s, const bytes_view& key);
void validate_keyspace_not_system(const std::string& keyspace);
void validate_ks_def(const KsDef& ks_def);
void validate_cf_def(const CfDef& cf_def);
void validate_column_name(const std::string& name);
void validate_column_names(const std::vector<std::string>& names);
void validate_column(const Column& col, const column_definition& def);
#if 0
public static void validateKeyspace(String keyspaceName) throws KeyspaceNotDefinedException
{
if (!Schema.instance.getKeyspaces().contains(keyspaceName))
{
throw new KeyspaceNotDefinedException("Keyspace " + keyspaceName + " does not exist");
}
}
public static CFMetaData validateColumnFamily(String keyspaceName, String cfName, boolean isCommutativeOp) throws org.apache.cassandra.exceptions.InvalidRequestException
{
CFMetaData metadata = validateColumnFamily(keyspaceName, cfName);
if (isCommutativeOp)
{
if (!metadata.isCounter())
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + cfName);
}
else
{
if (metadata.isCounter())
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + cfName);
}
return metadata;
}
/**
* validates all parts of the path to the column, including the column name
*/
public static void validateColumnPath(CFMetaData metadata, ColumnPath column_path) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (metadata.cfType == ColumnFamilyType.Standard)
{
if (column_path.super_column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn parameter is invalid for standard CF " + metadata.cfName);
}
if (column_path.column == null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("column parameter is not optional for standard CF " + metadata.cfName);
}
}
else
{
if (column_path.super_column == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn parameter is not optional for super CF " + metadata.cfName);
}
if (column_path.column != null)
{
validateColumnNames(metadata, column_path.super_column, Arrays.asList(column_path.column));
}
if (column_path.super_column != null)
{
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_path.super_column));
}
}
public static void validateColumnParent(CFMetaData metadata, ColumnParent column_parent) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (metadata.cfType == ColumnFamilyType.Standard)
{
if (column_parent.super_column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("table alone is required for standard CF " + metadata.cfName);
}
}
if (column_parent.super_column != null)
{
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_parent.super_column));
}
}
// column_path_or_parent is a ColumnPath for remove, where the "column" is optional even for a standard CF
static void validateColumnPathOrParent(CFMetaData metadata, ColumnPath column_path_or_parent) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (metadata.cfType == ColumnFamilyType.Standard)
{
if (column_path_or_parent.super_column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn may not be specified for standard CF " + metadata.cfName);
}
}
if (metadata.cfType == ColumnFamilyType.Super)
{
if (column_path_or_parent.super_column == null && column_path_or_parent.column != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("A column cannot be specified without specifying a super column for removal on super CF "
+ metadata.cfName);
}
}
if (column_path_or_parent.column != null)
{
validateColumnNames(metadata, column_path_or_parent.super_column, Arrays.asList(column_path_or_parent.column));
}
if (column_path_or_parent.super_column != null)
{
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(column_path_or_parent.super_column));
}
}
/**
* Validates the column names but not the parent path or data
*/
private static void validateColumnNames(CFMetaData metadata, ByteBuffer superColumnName, Iterable<ByteBuffer> column_names)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
int maxNameLength = Cell.MAX_NAME_LENGTH;
if (superColumnName != null)
{
if (superColumnName.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name length must not be greater than " + maxNameLength);
if (superColumnName.remaining() == 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn name must not be empty");
if (metadata.cfType == ColumnFamilyType.Standard)
throw new org.apache.cassandra.exceptions.InvalidRequestException("supercolumn specified to table " + metadata.cfName + " containing normal columns");
}
AbstractType<?> comparator = SuperColumns.getComparatorFor(metadata, superColumnName);
boolean isCQL3Table = !metadata.isThriftCompatible();
for (ByteBuffer name : column_names)
{
if (name.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("column name length must not be greater than " + maxNameLength);
if (name.remaining() == 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("column name must not be empty");
try
{
comparator.validate(name);
}
catch (MarshalException e)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage());
}
if (isCQL3Table)
{
// CQL3 table don't support having only part of their composite column names set
Composite composite = metadata.comparator.fromByteBuffer(name);
int minComponents = metadata.comparator.clusteringPrefixSize() + 1;
if (composite.size() < minComponents)
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Not enough components (found %d but %d expected) for column name since %s is a CQL3 table",
composite.size(), minComponents, metadata.cfName));
// Furthermore, the column name must be a declared one.
int columnIndex = metadata.comparator.clusteringPrefixSize();
ByteBuffer CQL3ColumnName = composite.get(columnIndex);
if (!CQL3ColumnName.hasRemaining())
continue; // Row marker, ok
ColumnIdentifier columnId = new ColumnIdentifier(CQL3ColumnName, metadata.comparator.subtype(columnIndex));
if (metadata.getColumnDefinition(columnId) == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid cell for CQL3 table %s. The CQL3 column component (%s) does not correspond to a defined CQL3 column",
metadata.cfName, columnId));
// On top of that, if we have a collection component, he (CQL3) column must be a collection
if (metadata.comparator.hasCollections() && composite.size() == metadata.comparator.size())
{
ColumnToCollectionType collectionType = metadata.comparator.collectionType();
if (!collectionType.defined.containsKey(CQL3ColumnName))
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Invalid collection component, %s is not a collection", UTF8Type.instance.getString(CQL3ColumnName)));
}
}
}
}
public static void validateColumnNames(CFMetaData metadata, ColumnParent column_parent, Iterable<ByteBuffer> column_names) throws org.apache.cassandra.exceptions.InvalidRequestException
{
validateColumnNames(metadata, column_parent.super_column, column_names);
}
public static void validateRange(CFMetaData metadata, ColumnParent column_parent, SliceRange range) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (range.count < 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("get_slice requires non-negative count");
int maxNameLength = Cell.MAX_NAME_LENGTH;
if (range.start.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("range start length cannot be larger than " + maxNameLength);
if (range.finish.remaining() > maxNameLength)
throw new org.apache.cassandra.exceptions.InvalidRequestException("range finish length cannot be larger than " + maxNameLength);
AbstractType<?> comparator = SuperColumns.getComparatorFor(metadata, column_parent.super_column);
try
{
comparator.validate(range.start);
comparator.validate(range.finish);
}
catch (MarshalException e)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(e.getMessage());
}
Comparator<ByteBuffer> orderedComparator = range.isReversed() ? comparator.reverseComparator : comparator;
if (range.start.remaining() > 0
&& range.finish.remaining() > 0
&& orderedComparator.compare(range.start, range.finish) > 0)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("range finish must come after start in the order of traversal");
}
}
public static void validateColumnOrSuperColumn(CFMetaData metadata, ColumnOrSuperColumn cosc)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
boolean isCommutative = metadata.isCounter();
int nulls = 0;
if (cosc.column == null) nulls++;
if (cosc.super_column == null) nulls++;
if (cosc.counter_column == null) nulls++;
if (cosc.counter_super_column == null) nulls++;
if (nulls != 3)
throw new org.apache.cassandra.exceptions.InvalidRequestException("ColumnOrSuperColumn must have one (and only one) of column, super_column, counter and counter_super_column");
if (cosc.column != null)
{
if (isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + metadata.cfName);
validateTtl(cosc.column);
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.column.name));
validateColumnData(metadata, null, cosc.column);
}
if (cosc.super_column != null)
{
if (isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for commutative table " + metadata.cfName);
for (Column c : cosc.super_column.columns)
{
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.super_column.name).setColumn(c.name));
validateColumnData(metadata, cosc.super_column.name, c);
}
}
if (cosc.counter_column != null)
{
if (!isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + metadata.cfName);
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column((ByteBuffer)null).setColumn(cosc.counter_column.name));
}
if (cosc.counter_super_column != null)
{
if (!isCommutative)
throw new org.apache.cassandra.exceptions.InvalidRequestException("invalid operation for non commutative table " + metadata.cfName);
for (CounterColumn c : cosc.counter_super_column.columns)
validateColumnPath(metadata, new ColumnPath(metadata.cfName).setSuper_column(cosc.counter_super_column.name).setColumn(c.name));
}
}
private static void validateTtl(Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (column.isSetTtl())
{
if (column.ttl <= 0)
throw new org.apache.cassandra.exceptions.InvalidRequestException("ttl must be positive");
if (column.ttl > ExpiringCell.MAX_TTL)
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("ttl is too large. requested (%d) maximum (%d)", column.ttl, ExpiringCell.MAX_TTL));
}
else
{
// if it's not set, then it should be zero -- here we are just checking to make sure Thrift doesn't change that contract with us.
assert column.ttl == 0;
}
}
public static void validateMutation(CFMetaData metadata, Mutation mut)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
ColumnOrSuperColumn cosc = mut.column_or_supercolumn;
Deletion del = mut.deletion;
int nulls = 0;
if (cosc == null) nulls++;
if (del == null) nulls++;
if (nulls != 1)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("mutation must have one and only one of column_or_supercolumn or deletion");
}
if (cosc != null)
{
validateColumnOrSuperColumn(metadata, cosc);
}
else
{
validateDeletion(metadata, del);
}
}
public static void validateDeletion(CFMetaData metadata, Deletion del) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (del.super_column != null)
validateColumnNames(metadata, (ByteBuffer)null, Arrays.asList(del.super_column));
if (del.predicate != null)
validateSlicePredicate(metadata, del.super_column, del.predicate);
if (metadata.cfType == ColumnFamilyType.Standard && del.super_column != null)
{
String msg = String.format("Deletion of super columns is not possible on a standard table (KeySpace=%s Table=%s Deletion=%s)", metadata.ksName, metadata.cfName, del);
throw new org.apache.cassandra.exceptions.InvalidRequestException(msg);
}
if (metadata.isCounter())
{
// forcing server timestamp even if a timestamp was set for coherence with other counter operation
del.timestamp = System.currentTimeMillis();
}
else if (!del.isSetTimestamp())
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("Deletion timestamp is not optional for non commutative table " + metadata.cfName);
}
}
public static void validateSlicePredicate(CFMetaData metadata, ByteBuffer scName, SlicePredicate predicate) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (predicate.column_names == null && predicate.slice_range == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("A SlicePredicate must be given a list of Columns, a SliceRange, or both");
if (predicate.slice_range != null)
validateRange(metadata, new ColumnParent(metadata.cfName).setSuper_column(scName), predicate.slice_range);
if (predicate.column_names != null)
validateColumnNames(metadata, scName, predicate.column_names);
}
/**
* Validates the data part of the column (everything in the column object but the name, which is assumed to be valid)
*/
public static void validateColumnData(CFMetaData metadata, ByteBuffer scName, Column column) throws org.apache.cassandra.exceptions.InvalidRequestException
{
validateTtl(column);
if (!column.isSetValue())
throw new org.apache.cassandra.exceptions.InvalidRequestException("Column value is required");
if (!column.isSetTimestamp())
throw new org.apache.cassandra.exceptions.InvalidRequestException("Column timestamp is required");
CellName cn = scName == null
? metadata.comparator.cellFromByteBuffer(column.name)
: metadata.comparator.makeCellName(scName, column.name);
try
{
AbstractType<?> validator = metadata.getValueValidator(cn);
if (validator != null)
validator.validate(column.value);
}
catch (MarshalException me)
{
if (logger.isDebugEnabled())
logger.debug("rejecting invalid value {}", ByteBufferUtil.bytesToHex(summarize(column.value)));
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("(%s) [%s][%s][%s] failed validation",
me.getMessage(),
metadata.ksName,
metadata.cfName,
(SuperColumns.getComparatorFor(metadata, scName != null)).getString(column.name)));
}
// Indexed column values cannot be larger than 64K. See CASSANDRA-3057/4240 for more details
if (!Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager.validate(asDBColumn(cn, column)))
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Can't index column value of size %d for index %s in CF %s of KS %s",
column.value.remaining(),
metadata.getColumnDefinition(cn).getIndexName(),
metadata.cfName,
metadata.ksName));
}
private static Cell asDBColumn(CellName name, Column column)
{
if (column.ttl <= 0)
return new BufferCell(name, column.value, column.timestamp);
else
return new BufferExpiringCell(name, column.value, column.timestamp, column.ttl);
}
/**
* Return, at most, the first 64K of the buffer. This avoids very large column values being
* logged in their entirety.
*/
private static ByteBuffer summarize(ByteBuffer buffer)
{
int MAX = Short.MAX_VALUE;
if (buffer.remaining() <= MAX)
return buffer;
return (ByteBuffer) buffer.slice().limit(buffer.position() + MAX);
}
public static void validatePredicate(CFMetaData metadata, ColumnParent column_parent, SlicePredicate predicate)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (predicate.column_names == null && predicate.slice_range == null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("predicate column_names and slice_range may not both be null");
if (predicate.column_names != null && predicate.slice_range != null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("predicate column_names and slice_range may not both be present");
if (predicate.getSlice_range() != null)
validateRange(metadata, column_parent, predicate.slice_range);
else
validateColumnNames(metadata, column_parent, predicate.column_names);
}
public static void validateKeyRange(CFMetaData metadata, ByteBuffer superColumn, KeyRange range) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if ((range.start_key == null) == (range.start_token == null)
|| (range.end_key == null) == (range.end_token == null))
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("exactly one each of {start key, start token} and {end key, end token} must be specified");
}
// (key, token) is supported (for wide-row CFRR) but not (token, key)
if (range.start_token != null && range.end_key != null)
throw new org.apache.cassandra.exceptions.InvalidRequestException("start token + end key is not a supported key range");
IPartitioner p = StorageService.getPartitioner();
if (range.start_key != null && range.end_key != null)
{
Token startToken = p.getToken(range.start_key);
Token endToken = p.getToken(range.end_key);
if (startToken.compareTo(endToken) > 0 && !endToken.isMinimum())
{
if (p.preservesOrder())
throw new org.apache.cassandra.exceptions.InvalidRequestException("start key must sort before (or equal to) finish key in your partitioner!");
else
throw new org.apache.cassandra.exceptions.InvalidRequestException("start key's token sorts after end key's token. this is not allowed; you probably should not specify end key at all except with an ordered partitioner");
}
}
else if (range.start_key != null && range.end_token != null)
{
// start_token/end_token can wrap, but key/token should not
RowPosition stop = p.getTokenFactory().fromString(range.end_token).maxKeyBound();
if (RowPosition.ForKey.get(range.start_key, p).compareTo(stop) > 0 && !stop.isMinimum())
throw new org.apache.cassandra.exceptions.InvalidRequestException("Start key's token sorts after end token");
}
validateFilterClauses(metadata, range.row_filter);
if (!isEmpty(range.row_filter) && superColumn != null)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("super columns are not supported for indexing");
}
if (range.count <= 0)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException("maxRows must be positive");
}
}
private static boolean isEmpty(List<IndexExpression> clause)
{
return clause == null || clause.isEmpty();
}
public static void validateIndexClauses(CFMetaData metadata, IndexClause index_clause)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (index_clause.expressions.isEmpty())
throw new org.apache.cassandra.exceptions.InvalidRequestException("index clause list may not be empty");
if (!validateFilterClauses(metadata, index_clause.expressions))
throw new org.apache.cassandra.exceptions.InvalidRequestException("No indexed columns present in index clause with operator EQ");
}
// return true if index_clause contains an indexed columns with operator EQ
public static boolean validateFilterClauses(CFMetaData metadata, List<IndexExpression> index_clause)
throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (isEmpty(index_clause))
// no filter to apply
return false;
SecondaryIndexManager idxManager = Keyspace.open(metadata.ksName).getColumnFamilyStore(metadata.cfName).indexManager;
AbstractType<?> nameValidator = SuperColumns.getComparatorFor(metadata, null);
boolean isIndexed = false;
for (IndexExpression expression : index_clause)
{
try
{
nameValidator.validate(expression.column_name);
}
catch (MarshalException me)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("[%s]=[%s] failed name validation (%s)",
ByteBufferUtil.bytesToHex(expression.column_name),
ByteBufferUtil.bytesToHex(expression.value),
me.getMessage()));
}
if (expression.value.remaining() > 0xFFFF)
throw new org.apache.cassandra.exceptions.InvalidRequestException("Index expression values may not be larger than 64K");
CellName name = metadata.comparator.cellFromByteBuffer(expression.column_name);
AbstractType<?> valueValidator = metadata.getValueValidator(name);
try
{
valueValidator.validate(expression.value);
}
catch (MarshalException me)
{
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("[%s]=[%s] failed value validation (%s)",
ByteBufferUtil.bytesToHex(expression.column_name),
ByteBufferUtil.bytesToHex(expression.value),
me.getMessage()));
}
isIndexed |= (expression.op == IndexOperator.EQ) && idxManager.indexes(name);
}
return isIndexed;
}
public static void validateKeyspaceNotYetExisting(String newKsName) throws org.apache.cassandra.exceptions.InvalidRequestException
{
// keyspace names must be unique case-insensitively because the keyspace name becomes the directory
// where we store CF sstables. Names that differ only in case would thus cause problems on
// case-insensitive filesystems (NTFS, most installations of HFS+).
for (String ksName : Schema.instance.getKeyspaces())
{
if (ksName.equalsIgnoreCase(newKsName))
throw new org.apache.cassandra.exceptions.InvalidRequestException(String.format("Keyspace names must be case-insensitively unique (\"%s\" conflicts with \"%s\")",
newKsName,
ksName));
}
}
public static void validateKeyspaceNotSystem(String modifiedKeyspace) throws org.apache.cassandra.exceptions.InvalidRequestException
{
if (modifiedKeyspace.equalsIgnoreCase(SystemKeyspace.NAME))
throw new org.apache.cassandra.exceptions.InvalidRequestException("system keyspace is not user-modifiable");
}
public static IDiskAtomFilter asIFilter(SlicePredicate sp, CFMetaData metadata, ByteBuffer superColumn)
{
SliceRange sr = sp.slice_range;
IDiskAtomFilter filter;
CellNameType comparator = metadata.isSuper()
? new SimpleDenseCellNameType(metadata.comparator.subtype(superColumn == null ? 0 : 1))
: metadata.comparator;
if (sr == null)
{
SortedSet<CellName> ss = new TreeSet<CellName>(comparator);
for (ByteBuffer bb : sp.column_names)
ss.add(comparator.cellFromByteBuffer(bb));
filter = new NamesQueryFilter(ss);
}
else
{
filter = new SliceQueryFilter(comparator.fromByteBuffer(sr.start),
comparator.fromByteBuffer(sr.finish),
sr.reversed,
sr.count);
}
if (metadata.isSuper())
filter = SuperColumns.fromSCFilter(metadata.comparator, superColumn, filter);
return filter;
}
}
#endif
}

View File

@@ -1,24 +0,0 @@
/*
* Copyright (C) 2016-present ScyllaDB
*/
/*
* SPDX-License-Identifier: AGPL-3.0-or-later
*/
#pragma once
#include <utility>
#include "utils/fmt-compat.hh"
namespace thrift {
template <typename Ex, typename... Args>
Ex
make_exception(const char* fmt, Args&&... args) {
Ex ex;
ex.why = fmt::format(fmt::runtime(fmt), std::forward<Args>(args)...);
return ex;
}
}

View File

@@ -18,7 +18,7 @@ class updateable_timeout_config;
/// timeout_config represents a snapshot of the options stored in it when
/// an instance of this class is created. so far this class is only used by
/// client_state and thrift_handler. so either these classes are obliged to
/// client_state. so either these classes are obliged to
/// update it by themselves, or they are fine with using the maybe-updated
/// options in the lifecycle of a client / connection even if some of these
/// options are changed whtn the client / connection is still alive.

View File

@@ -34,11 +34,6 @@ std::ostream& operator<<(std::ostream& os, const result_message::set_keyspace& m
return os;
}
std::ostream& operator<<(std::ostream& os, const result_message::prepared::thrift& msg) {
fmt::print(os, "{{result_message::prepared::thrift {:d}}}", msg.get_id());
return os;
}
std::ostream& operator<<(std::ostream& os, const result_message::prepared::cql& msg) {
fmt::print(os, "{{result_message::prepared::cql {}}}", to_hex(msg.get_id()));
return os;
@@ -58,7 +53,6 @@ std::ostream& operator<<(std::ostream& os, const result_message& msg) {
void visit(const result_message::void_message& m) override { _os << m; };
void visit(const result_message::set_keyspace& m) override { _os << m; };
void visit(const result_message::prepared::cql& m) override { _os << m; };
void visit(const result_message::prepared::thrift& m) override { _os << m; };
void visit(const result_message::schema_change& m) override { _os << m; };
void visit(const result_message::rows& m) override { fmt::print(_os, "{}", m); };
void visit(const result_message::bounce_to_shard& m) override { _os << m; };

View File

@@ -47,7 +47,6 @@ public:
}
class cql;
class thrift;
private:
static ::shared_ptr<const cql3::metadata> extract_result_metadata(::shared_ptr<cql3::cql_statement> statement);
};
@@ -57,7 +56,6 @@ public:
virtual void visit(const result_message::void_message&) = 0;
virtual void visit(const result_message::set_keyspace&) = 0;
virtual void visit(const result_message::prepared::cql&) = 0;
virtual void visit(const result_message::prepared::thrift&) = 0;
virtual void visit(const result_message::schema_change&) = 0;
virtual void visit(const result_message::rows&) = 0;
virtual void visit(const result_message::bounce_to_shard&) = 0;
@@ -69,7 +67,6 @@ public:
void visit(const result_message::void_message&) override {};
void visit(const result_message::set_keyspace&) override {};
void visit(const result_message::prepared::cql&) override {};
void visit(const result_message::prepared::thrift&) override {};
void visit(const result_message::schema_change&) override {};
void visit(const result_message::rows&) override {};
void visit(const result_message::bounce_to_shard&) override { assert(false); };
@@ -187,24 +184,6 @@ public:
std::ostream& operator<<(std::ostream& os, const result_message::prepared::cql& msg);
class result_message::prepared::thrift : public result_message::prepared {
int32_t _id;
public:
thrift(int32_t id, cql3::statements::prepared_statement::checked_weak_ptr prepared, bool support_lwt_opt)
: result_message::prepared(std::move(prepared), support_lwt_opt)
, _id{id}
{ }
int32_t get_id() const {
return _id;
}
virtual void accept(result_message::visitor& v) const override {
v.visit(*this);
}
};
std::ostream& operator<<(std::ostream& os, const result_message::prepared::thrift& msg);
class result_message::schema_change : public result_message {
private:

View File

@@ -616,15 +616,14 @@ void cql_server::connection::on_connection_close()
_server._notifier->unregister_connection(this);
}
std::tuple<net::inet_address, int, client_type> cql_server::connection::make_client_key(const service::client_state& cli_state) {
return std::make_tuple(cli_state.get_client_address().addr(),
cli_state.get_client_port(),
cli_state.is_thrift() ? client_type::thrift : client_type::cql);
std::pair<net::inet_address, int> cql_server::connection::make_client_key(const service::client_state& cli_state) {
return {cli_state.get_client_address().addr(),
cli_state.get_client_port()};
}
client_data cql_server::connection::make_client_data() const {
client_data cd;
std::tie(cd.ip, cd.port, cd.ct) = make_client_key(_client_state);
std::tie(cd.ip, cd.port) = make_client_key(_client_state);
cd.shard_id = this_shard_id();
cd.protocol_version = _version;
cd.driver_name = _client_state.get_driver_name();
@@ -1064,14 +1063,14 @@ future<std::unique_ptr<cql_server::response>> cql_server::connection::process_pr
return parallel_for_each(cpus.begin(), cpus.end(), [this, query, cpu_id, &client_state] (unsigned int c) mutable {
if (c != cpu_id) {
return smp::submit_to(c, [this, query, &client_state] () mutable {
return _server._query_processor.local().prepare(std::move(query), client_state, false).discard_result();
return _server._query_processor.local().prepare(std::move(query), client_state).discard_result();
});
} else {
return make_ready_future<>();
}
}).then([this, query, stream, &client_state, trace_state] () mutable {
tracing::trace(trace_state, "Done preparing on remote shards");
return _server._query_processor.local().prepare(std::move(query), client_state, false).then([this, stream, trace_state] (auto msg) {
return _server._query_processor.local().prepare(std::move(query), client_state).then([this, stream, trace_state] (auto msg) {
tracing::trace(trace_state, "Done preparing on a local shard - preparing a result. ID is [{}]", seastar::value_of([&msg] {
return messages::result_message::prepared::cql::get_id(msg);
}));

View File

@@ -223,7 +223,7 @@ private:
future<> process_request() override;
void handle_error(future<>&& f) override;
void on_connection_close() override;
static std::tuple<net::inet_address, int, client_type> make_client_key(const service::client_state& cli_state);
static std::pair<net::inet_address, int> make_client_key(const service::client_state& cli_state);
client_data make_client_data() const;
const service::client_state& get_client_state() const { return _client_state; }
private:

View File

@@ -35,7 +35,6 @@ std::string_view format_as(cause c) {
case cause::LEGACY_COMPOSITE_KEYS: return "LEGACY_COMPOSITE_KEYS";
case cause::COLLECTION_RANGE_TOMBSTONES: return "COLLECTION_RANGE_TOMBSTONES";
case cause::RANGE_DELETES: return "RANGE_DELETES";
case cause::THRIFT: return "THRIFT";
case cause::VALIDATION: return "VALIDATION";
case cause::REVERSED: return "REVERSED";
case cause::COMPRESSION: return "COMPRESSION";

View File

@@ -30,7 +30,6 @@ enum class cause {
LEGACY_COMPOSITE_KEYS,
COLLECTION_RANGE_TOMBSTONES,
RANGE_DELETES,
THRIFT,
VALIDATION,
REVERSED,
COMPRESSION,