cql3/restrictions: Use free functions instead of methods
Instead of `restriction` class methods, use the new free functions. Specific replacement actions are listed below. Note that class `restrictions` (plural) remains intact -- both its methods and its type hierarchy remain intact for now. Ensure full test coverage of the replacement code with new file test/boost/restrictions_test.cc and some extra testcases in test/cql/*. Drop some existing tests because they codify buggy behaviour (reference #6369, #6382). Drop others because they forbid relation combinations that are now allowed (eg, mixing equality and inequality, comparing to NULL, etc.). Here are some specific categories of what was replaced: - restriction::is_foo predicates are replaced by using the free function find_if; sometimes it is used transitively (see, eg, has_slice) - restriction::is_multi_column is replaced by dynamic casts (recall that the `restrictions` class hierarchy still exists) - utility methods is_satisfied_by, is_supported_by, to_string, and uses_function are replaced by eponymous free functions; note that restrictions::uses_function still exists - restriction::apply_to is replaced by free function replace_column_def - when checking infinite_bound_range_deletions, the has_bound is replaced by local free function bounded_ck - restriction::bounds and restriction::value are replaced by the more general free function possible_lhs_values - using free functions allows us to simplify the multi_column_restriction and token_restriction hierarchies; their methods merge_with and uses_function became identical in all subclasses, so they were moved to the base class - single_column_primary_key_restrictions<clustering_key>::needs_filtering was changed to reuse num_prefix_columns_that_need_not_be_filtered, which uses free functions Fixes #5799. Fixes #6369. Fixes #6371. Fixes #6372. Fixes #6382. Signed-off-by: Dejan Mircevski <dejan@scylladb.com>
This commit is contained in:
@@ -353,6 +353,7 @@ scylla_tests = set([
|
||||
'test/boost/range_test',
|
||||
'test/boost/range_tombstone_list_test',
|
||||
'test/boost/reusable_buffer_test',
|
||||
'test/boost/restrictions_test',
|
||||
'test/boost/role_manager_test',
|
||||
'test/boost/row_cache_test',
|
||||
'test/boost/schema_change_test',
|
||||
|
||||
@@ -86,9 +86,9 @@ public:
|
||||
}
|
||||
|
||||
virtual void merge_with(::shared_ptr<restriction> other) override {
|
||||
statements::request_validations::check_true(other->is_multi_column(),
|
||||
const auto as_pkr = dynamic_pointer_cast<clustering_key_restrictions>(other);
|
||||
statements::request_validations::check_true(bool(as_pkr),
|
||||
"Mixing single column relations and multi column relations on clustering columns is not allowed");
|
||||
auto as_pkr = static_pointer_cast<clustering_key_restrictions>(other);
|
||||
do_merge_with(as_pkr);
|
||||
update_asc_desc_existence();
|
||||
expression = make_conjunction(std::move(expression), other->expression);
|
||||
@@ -109,6 +109,10 @@ public:
|
||||
});
|
||||
}
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return cql3::restrictions::uses_function(expression, ks_name, function_name);
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void do_merge_with(::shared_ptr<clustering_key_restrictions> other) = 0;
|
||||
|
||||
@@ -217,10 +221,6 @@ public:
|
||||
std::vector<column_value>(_column_defs.cbegin(), _column_defs.cend()), &operator_type::EQ, _value};
|
||||
}
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return restriction::term_uses_function(_value, ks_name, function_name);
|
||||
}
|
||||
|
||||
virtual bool is_supported_by(const secondary_index::index& index) const override {
|
||||
for (auto* cdef : _column_defs) {
|
||||
if (index.supports_expression(*cdef, cql3::operator_type::EQ)) {
|
||||
@@ -386,10 +386,6 @@ public:
|
||||
::make_shared<lists::delayed_value>(_values)};
|
||||
}
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return restriction::term_uses_function(_values, ks_name, function_name);
|
||||
}
|
||||
|
||||
virtual sstring to_string() const override {
|
||||
return format("IN({})", std::to_string(_values));
|
||||
}
|
||||
@@ -422,10 +418,6 @@ public:
|
||||
std::move(marker)};
|
||||
}
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual sstring to_string() const override {
|
||||
return "IN ?";
|
||||
}
|
||||
@@ -512,26 +504,21 @@ public:
|
||||
return _slice.has_bound(b);
|
||||
}
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return (_slice.has_bound(statements::bound::START) && restriction::term_uses_function(_slice.bound(statements::bound::START), ks_name, function_name))
|
||||
|| (_slice.has_bound(statements::bound::END) && restriction::term_uses_function(_slice.bound(statements::bound::END), ks_name, function_name));
|
||||
}
|
||||
|
||||
virtual bool is_inclusive(statements::bound b) const override {
|
||||
return _slice.is_inclusive(b);
|
||||
}
|
||||
|
||||
virtual void do_merge_with(::shared_ptr<clustering_key_restrictions> other) override {
|
||||
using namespace statements::request_validations;
|
||||
check_true(other->is_slice(),
|
||||
check_true(has_slice(other->expression),
|
||||
"Column \"%s\" cannot be restricted by both an equality and an inequality relation",
|
||||
get_columns_in_commons(other));
|
||||
auto other_slice = static_pointer_cast<slice>(other);
|
||||
|
||||
check_false(has_bound(statements::bound::START) && other_slice->has_bound(statements::bound::START),
|
||||
check_false(_slice.has_bound(statements::bound::START) && other_slice->_slice.has_bound(statements::bound::START),
|
||||
"More than one restriction was found for the start bound on %s",
|
||||
get_columns_in_commons(other));
|
||||
check_false(has_bound(statements::bound::END) && other_slice->has_bound(statements::bound::END),
|
||||
check_false(_slice.has_bound(statements::bound::END) && other_slice->_slice.has_bound(statements::bound::END),
|
||||
"More than one restriction was found for the end bound on %s",
|
||||
get_columns_in_commons(other));
|
||||
|
||||
@@ -560,10 +547,10 @@ private:
|
||||
}
|
||||
|
||||
std::vector<bytes_opt> read_bound_components(const query_options& options, statements::bound b) const {
|
||||
if (!has_bound(b)) {
|
||||
if (!_slice.has_bound(b)) {
|
||||
return {};
|
||||
}
|
||||
auto vals = component_bounds(b, options);
|
||||
auto vals = first_multicolumn_bound(expression, options, b);
|
||||
for (unsigned i = 0; i < vals.size(); i++) {
|
||||
statements::request_validations::check_not_null(vals[i], "Invalid null value in condition for column %s", _column_defs.at(i)->name_as_text());
|
||||
}
|
||||
@@ -578,9 +565,9 @@ private:
|
||||
*/
|
||||
std::vector<bounds_range_type> bounds_ranges_unified_order(const query_options& options) const {
|
||||
auto start_prefix = clustering_key_prefix::from_optional_exploded(*_schema, read_bound_components(options, statements::bound::START));
|
||||
auto start_bound = bounds_range_type::bound(std::move(start_prefix), is_inclusive(statements::bound::START));
|
||||
auto start_bound = bounds_range_type::bound(std::move(start_prefix), _slice.is_inclusive(statements::bound::START));
|
||||
auto end_prefix = clustering_key_prefix::from_optional_exploded(*_schema, read_bound_components(options, statements::bound::END));
|
||||
auto end_bound = bounds_range_type::bound(std::move(end_prefix), is_inclusive(statements::bound::END));
|
||||
auto end_bound = bounds_range_type::bound(std::move(end_prefix), _slice.is_inclusive(statements::bound::END));
|
||||
auto make_range = [&] () {
|
||||
if (is_asc_order()) {
|
||||
return bounds_range_type::make(start_bound, end_bound);
|
||||
@@ -719,8 +706,8 @@ private:
|
||||
std::vector<restriction_shared_ptr> ret;
|
||||
auto start_components = read_bound_components(options, statements::bound::START);
|
||||
auto end_components = read_bound_components(options, statements::bound::END);
|
||||
bool start_inclusive = is_inclusive(statements::bound::START);
|
||||
bool end_inclusive = is_inclusive(statements::bound::END);
|
||||
bool start_inclusive = _slice.is_inclusive(statements::bound::START);
|
||||
bool end_inclusive = _slice.is_inclusive(statements::bound::END);
|
||||
std::optional<std::size_t> first_neq_component = std::nullopt;
|
||||
|
||||
// find the first index of the first component that is not equal between the tuples.
|
||||
|
||||
@@ -93,8 +93,8 @@ public:
|
||||
}
|
||||
|
||||
virtual bool needs_filtering(const schema& schema) const {
|
||||
return !empty() && !is_on_token() &&
|
||||
(has_unrestricted_components(schema) || is_contains() || is_slice() || is_LIKE());
|
||||
return !empty() && !has_token(expression) &&
|
||||
(has_unrestricted_components(schema) || has_slice_or_needs_filtering(expression));
|
||||
}
|
||||
|
||||
// NOTICE(sarna): This function is useless for partition key restrictions,
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
#include "schema_fwd.hh"
|
||||
#include "cartesian_product.hh"
|
||||
@@ -48,6 +49,7 @@
|
||||
#include "cql3/restrictions/single_column_restrictions.hh"
|
||||
#include "cql3/cql_config.hh"
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include <boost/range/adaptor/filtered.hpp>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
@@ -117,8 +119,10 @@ public:
|
||||
if (!this_cdef) {
|
||||
throw exceptions::invalid_request_exception(format("Base column {} not found in view index schema", other_cdef->name_as_text()));
|
||||
}
|
||||
::shared_ptr<single_column_restriction> restriction = entry.second;
|
||||
_restrictions->add_restriction(restriction->apply_to(*this_cdef));
|
||||
// Make a(ny) concrete subclass, since single_column_restriction is still abstract for now.
|
||||
auto r = ::make_shared<single_column_restriction::EQ>(*this_cdef, nullptr);
|
||||
r->expression = replace_column_def(entry.second->expression, this_cdef);
|
||||
_restrictions->add_restriction(r);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,13 +147,13 @@ public:
|
||||
auto last_column = *_restrictions->last_column();
|
||||
auto new_column = restriction->get_column_def();
|
||||
|
||||
if (this->is_slice() && _schema->position(new_column) > _schema->position(last_column)) {
|
||||
if (has_slice(this->expression) && _schema->position(new_column) > _schema->position(last_column)) {
|
||||
throw exceptions::invalid_request_exception(format("Clustering column \"{}\" cannot be restricted (preceding column \"{}\" is restricted by a non-EQ relation)",
|
||||
new_column.name_as_text(), last_column.name_as_text()));
|
||||
}
|
||||
|
||||
if (_schema->position(new_column) < _schema->position(last_column)) {
|
||||
if (restriction->is_slice()) {
|
||||
if (has_slice(restriction->expression)) {
|
||||
throw exceptions::invalid_request_exception(format("PRIMARY KEY column \"{}\" cannot be restricted (preceding column \"{}\" is restricted by a non-EQ relation)",
|
||||
last_column.name_as_text(), new_column.name_as_text()));
|
||||
}
|
||||
@@ -180,11 +184,14 @@ public:
|
||||
}
|
||||
|
||||
virtual void merge_with(::shared_ptr<restriction> restriction) override {
|
||||
if (restriction->is_multi_column()) {
|
||||
if (find_if(restriction->expression, [] (const binary_operator& b) {
|
||||
return std::holds_alternative<std::vector<column_value>>(b.lhs)
|
||||
&& std::get<std::vector<column_value>>(b.lhs).size() > 1;
|
||||
})) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"Mixing single column relations and multi column relations on clustering columns is not allowed");
|
||||
}
|
||||
if (restriction->is_on_token()) {
|
||||
if (has_token(restriction->expression)) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
format("Columns \"{}\" cannot be restricted by both a normal relation and a token relation",
|
||||
join(", ", get_column_defs())));
|
||||
@@ -196,20 +203,13 @@ public:
|
||||
std::vector<std::vector<bytes_opt>> value_vector;
|
||||
value_vector.reserve(_restrictions->size());
|
||||
for (auto&& e : restrictions()) {
|
||||
const column_definition* def = e.first;
|
||||
auto&& r = e.second;
|
||||
assert(!r->is_slice());
|
||||
|
||||
std::vector<bytes_opt> values = r->values(options);
|
||||
for (auto&& val : values) {
|
||||
if (!val) {
|
||||
throw exceptions::invalid_request_exception(format("Invalid null value for column {}", def->name_as_text()));
|
||||
}
|
||||
}
|
||||
assert(!has_slice(r->expression));
|
||||
auto values = std::get<value_list>(possible_lhs_values(e.first, r->expression, options));
|
||||
if (values.empty()) {
|
||||
return {};
|
||||
}
|
||||
value_vector.emplace_back(std::move(values));
|
||||
value_vector.emplace_back(std::vector<bytes_opt>(values.cbegin(), values.cend()));
|
||||
}
|
||||
|
||||
std::vector<ValueType> result;
|
||||
@@ -230,61 +230,57 @@ private:
|
||||
static constexpr auto invalid_null_msg = std::is_same<ValueType, partition_key>::value
|
||||
? "Invalid null value for partition key part %s" : "Invalid null value for clustering key part %s";
|
||||
|
||||
// TODO: rewrite this to simply invoke possible_lhs_values on each clustering column, find the first
|
||||
// non-list, and take Cartesian product of that prefix. No need for to_range() and std::get() here.
|
||||
if (_restrictions->is_all_eq()) {
|
||||
ranges.reserve(1);
|
||||
if (_restrictions->size() == 1) {
|
||||
auto&& e = *_restrictions->restrictions().begin();
|
||||
const column_definition* def = e.first;
|
||||
auto&& r = e.second;
|
||||
auto&& val = r->value(options);
|
||||
if (!val) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, def->name_as_text()));
|
||||
auto&& e = *restrictions().begin();
|
||||
const auto b = std::get<binary_operator>(e.second->expression).rhs->bind_and_get(options);
|
||||
if (!b) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, e.first->name_as_text()));
|
||||
}
|
||||
ranges.emplace_back(range_type::make_singular(ValueType::from_single_value(*_schema, std::move(*val))));
|
||||
return ranges;
|
||||
return {range_type::make_singular(ValueType::from_single_value(*_schema, to_bytes(b)))};
|
||||
}
|
||||
std::vector<bytes> components;
|
||||
components.reserve(_restrictions->size());
|
||||
for (auto&& e : _restrictions->restrictions()) {
|
||||
for (auto&& e : restrictions()) {
|
||||
const column_definition* def = e.first;
|
||||
auto&& r = e.second;
|
||||
assert(components.size() == _schema->position(*def));
|
||||
auto&& val = r->value(options);
|
||||
if (!val) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, def->name_as_text()));
|
||||
const auto b = std::get<binary_operator>(e.second->expression).rhs->bind_and_get(options);
|
||||
if (!b) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, e.first->name_as_text()));
|
||||
}
|
||||
components.emplace_back(std::move(*val));
|
||||
components.emplace_back(to_bytes(b));
|
||||
}
|
||||
ranges.emplace_back(range_type::make_singular(ValueType::from_exploded(*_schema, std::move(components))));
|
||||
return ranges;
|
||||
return {range_type::make_singular(ValueType::from_exploded(*_schema, std::move(components)))};
|
||||
}
|
||||
|
||||
std::vector<std::vector<bytes_opt>> vec_of_values;
|
||||
for (auto&& e : _restrictions->restrictions()) {
|
||||
for (auto&& e : restrictions()) {
|
||||
const column_definition* def = e.first;
|
||||
auto&& r = e.second;
|
||||
|
||||
if (vec_of_values.size() != _schema->position(*def) || r->is_contains() || r->is_LIKE()) {
|
||||
if (vec_of_values.size() != _schema->position(*def) || cql3::restrictions::needs_filtering(r->expression)) {
|
||||
// The prefixes built so far are the longest we can build,
|
||||
// the rest of the constraints will have to be applied using filtering.
|
||||
break;
|
||||
}
|
||||
|
||||
if (r->is_slice()) {
|
||||
if (has_slice(r->expression)) {
|
||||
const auto values = possible_lhs_values(def, r->expression, options);
|
||||
if (values == value_set(value_list{})) {
|
||||
return {};
|
||||
}
|
||||
const auto b = to_range(values);
|
||||
if (cartesian_product_is_empty(vec_of_values)) {
|
||||
auto read_bound = [r, &options, this] (statements::bound b) -> std::optional<range_bound> {
|
||||
if (!r->has_bound(b)) {
|
||||
return {};
|
||||
}
|
||||
auto value = r->bounds(b, options)[0];
|
||||
if (!value) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, r->to_string()));
|
||||
}
|
||||
return {range_bound(ValueType::from_single_value(*_schema, *value), r->is_inclusive(b))};
|
||||
// TODO: use b.transform().
|
||||
const auto make_bound = [&] (const std::optional<::range_bound<bytes>>& bytes_bound) {
|
||||
return bytes_bound ?
|
||||
std::optional(range_bound(ValueType::from_single_value(*_schema, bytes_bound->value()),
|
||||
bytes_bound->is_inclusive())) :
|
||||
std::nullopt;
|
||||
};
|
||||
ranges.emplace_back(range_type(
|
||||
read_bound(statements::bound::START),
|
||||
read_bound(statements::bound::END)));
|
||||
ranges.emplace_back(range_type(make_bound(b.start()), make_bound(b.end())));
|
||||
if (def->type->is_reversed()) {
|
||||
ranges.back().reverse();
|
||||
}
|
||||
@@ -296,25 +292,18 @@ private:
|
||||
restricted_component_name_v<ValueType>);
|
||||
ranges.reserve(size);
|
||||
for (auto&& prefix : make_cartesian_product(vec_of_values)) {
|
||||
auto read_bound = [r, &prefix, &options, this](statements::bound bound) -> range_bound {
|
||||
if (r->has_bound(bound)) {
|
||||
auto value = std::move(r->bounds(bound, options)[0]);
|
||||
if (!value) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, r->to_string()));
|
||||
}
|
||||
prefix.emplace_back(std::move(value));
|
||||
// TODO: use ranges.transform().
|
||||
auto make_bound = [&prefix, this] (const std::optional<::range_bound<bytes>>& bytes_bound) {
|
||||
if (bytes_bound) {
|
||||
prefix.emplace_back(bytes_bound->value());
|
||||
auto val = ValueType::from_optional_exploded(*_schema, prefix);
|
||||
prefix.pop_back();
|
||||
return range_bound(std::move(val), r->is_inclusive(bound));
|
||||
return range_bound(std::move(val), bytes_bound->is_inclusive());
|
||||
} else {
|
||||
return range_bound(ValueType::from_optional_exploded(*_schema, prefix));
|
||||
}
|
||||
};
|
||||
|
||||
ranges.emplace_back(range_type(
|
||||
read_bound(statements::bound::START),
|
||||
read_bound(statements::bound::END)));
|
||||
|
||||
ranges.emplace_back(range_type(make_bound(b.start()), make_bound(b.end())));
|
||||
if (def->type->is_reversed()) {
|
||||
ranges.back().reverse();
|
||||
}
|
||||
@@ -323,16 +312,11 @@ private:
|
||||
return ranges;
|
||||
}
|
||||
|
||||
auto values = r->values(options);
|
||||
for (auto&& val : values) {
|
||||
if (!val) {
|
||||
throw exceptions::invalid_request_exception(sprint(invalid_null_msg, def->name_as_text()));
|
||||
}
|
||||
}
|
||||
auto values = std::get<value_list>(possible_lhs_values(def, r->expression, options));
|
||||
if (values.empty()) {
|
||||
return {};
|
||||
}
|
||||
vec_of_values.emplace_back(std::move(values));
|
||||
vec_of_values.emplace_back(std::vector<bytes_opt>(values.cbegin(), values.cend()));
|
||||
}
|
||||
|
||||
auto size = cartesian_product_size(vec_of_values);
|
||||
@@ -473,42 +457,25 @@ inline bool single_column_primary_key_restrictions<partition_key>::needs_filteri
|
||||
return primary_key_restrictions<partition_key>::needs_filtering(schema);
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool single_column_primary_key_restrictions<clustering_key>::needs_filtering(const schema& schema) const {
|
||||
// Restrictions currently need filtering in three cases:
|
||||
// 1. any of them is a CONTAINS restriction
|
||||
// 2. restrictions do not form a contiguous prefix (i.e. there are gaps in it)
|
||||
// 3. a SLICE restriction isn't on a last place
|
||||
column_id position = 0;
|
||||
for (const auto& restriction : _restrictions->restrictions() | boost::adaptors::map_values) {
|
||||
if (restriction->is_contains() || restriction->is_LIKE() || position != restriction->get_column_def().id) {
|
||||
return true;
|
||||
}
|
||||
if (!restriction->is_slice()) {
|
||||
position = restriction->get_column_def().id + 1;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// How many of the restrictions (in column order) do not need filtering
|
||||
// because they are implemented as a slice (potentially, a contiguous disk
|
||||
// read). For example, if we have the filter "c1 < 3 and c2 > 3", c1 does not
|
||||
// need filtering but c2 does so num_prefix_columns_that_need_not_be_filtered
|
||||
// will be 1.
|
||||
// The implementation of num_prefix_columns_that_need_not_be_filtered() is
|
||||
// closely tied to that of needs_filtering() above - basically, if only the
|
||||
// first num_prefix_columns_that_need_not_be_filtered() restrictions existed,
|
||||
// then needs_filtering() would have returned false.
|
||||
template<>
|
||||
inline unsigned single_column_primary_key_restrictions<clustering_key>::num_prefix_columns_that_need_not_be_filtered() const {
|
||||
// Restrictions currently need filtering in three cases:
|
||||
// 1. any of them is a CONTAINS restriction
|
||||
// 2. restrictions do not form a contiguous prefix (i.e. there are gaps in it)
|
||||
// 3. a SLICE restriction isn't on a last place
|
||||
column_id position = 0;
|
||||
unsigned int count = 0;
|
||||
for (const auto& restriction : _restrictions->restrictions() | boost::adaptors::map_values) {
|
||||
if (restriction->is_contains() || position != restriction->get_column_def().id) {
|
||||
for (const auto& restriction : restrictions() | boost::adaptors::map_values) {
|
||||
if (cql3::restrictions::needs_filtering(restriction->expression)
|
||||
|| position != restriction->get_column_def().id) {
|
||||
return count;
|
||||
}
|
||||
if (!restriction->is_slice()) {
|
||||
if (!has_slice(restriction->expression)) {
|
||||
position = restriction->get_column_def().id + 1;
|
||||
}
|
||||
count++;
|
||||
@@ -516,6 +483,11 @@ inline unsigned single_column_primary_key_restrictions<clustering_key>::num_pref
|
||||
return count;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline bool single_column_primary_key_restrictions<clustering_key>::needs_filtering(const schema&) const {
|
||||
return num_prefix_columns_that_need_not_be_filtered() < size();
|
||||
}
|
||||
|
||||
template<>
|
||||
inline unsigned single_column_primary_key_restrictions<partition_key>::num_prefix_columns_that_need_not_be_filtered() const {
|
||||
// skip_filtering() is currently called only for clustering key
|
||||
|
||||
@@ -104,7 +104,13 @@ public:
|
||||
|
||||
virtual bytes_opt value_for(const column_definition& cdef, const query_options& options) const override {
|
||||
auto it = _restrictions.find(std::addressof(cdef));
|
||||
return (it != _restrictions.end()) ? it->second->value(options) : bytes_opt{};
|
||||
if (it == _restrictions.end()) {
|
||||
return bytes_opt{};
|
||||
} else {
|
||||
const auto values = std::get<value_list>(possible_lhs_values(&cdef, it->second->expression, options));
|
||||
assert(values.size() == 1);
|
||||
return values.front();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -123,7 +129,7 @@ public:
|
||||
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
for (auto&& e : _restrictions) {
|
||||
if (e.second->uses_function(ks_name, function_name)) {
|
||||
if (cql3::restrictions::uses_function(e.second->expression, ks_name, function_name)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -145,19 +151,22 @@ public:
|
||||
* @throws InvalidRequestException if the new restriction cannot be added
|
||||
*/
|
||||
void add_restriction(::shared_ptr<single_column_restriction> restriction) {
|
||||
_is_all_eq &= restriction->is_EQ();
|
||||
if (!find(restriction->expression, operator_type::EQ)) {
|
||||
_is_all_eq = false;
|
||||
}
|
||||
|
||||
auto i = _restrictions.find(&restriction->get_column_def());
|
||||
if (i == _restrictions.end()) {
|
||||
_restrictions.emplace_hint(i, &restriction->get_column_def(), std::move(restriction));
|
||||
} else {
|
||||
i->second->merge_with(restriction);
|
||||
auto& e = i->second->expression;
|
||||
e = make_conjunction(std::move(e), restriction->expression);
|
||||
}
|
||||
}
|
||||
|
||||
virtual bool has_supporting_index(const secondary_index::secondary_index_manager& index_manager, allow_local_index allow_local) const override {
|
||||
for (auto&& e : _restrictions) {
|
||||
if (e.second->has_supporting_index(index_manager, allow_local)) {
|
||||
if (cql3::restrictions::has_supporting_index(e.second->expression, index_manager, allow_local)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -223,11 +232,9 @@ public:
|
||||
bool has_multiple_contains() const {
|
||||
uint32_t number_of_contains = 0;
|
||||
for (auto&& e : _restrictions) {
|
||||
if (e.second->is_contains()) {
|
||||
auto contains_ = static_pointer_cast<single_column_restriction::contains>(e.second);
|
||||
number_of_contains += contains_->number_of_values();
|
||||
number_of_contains += contains_->number_of_keys();
|
||||
number_of_contains += contains_->number_of_entries();
|
||||
number_of_contains += count_if(e.second->expression, is_on_collection);
|
||||
if (number_of_contains > 1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return number_of_contains > 1;
|
||||
|
||||
@@ -23,12 +23,14 @@
|
||||
#include <algorithm>
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include <boost/range/algorithm/transform.hpp>
|
||||
#include <boost/range/algorithm.hpp>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
#include <boost/range/algorithm.hpp>
|
||||
#include <functional>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "query-result-reader.hh"
|
||||
#include "statement_restrictions.hh"
|
||||
#include "single_column_primary_key_restrictions.hh"
|
||||
#include "multi_column_restriction.hh"
|
||||
#include "token_restriction.hh"
|
||||
#include "database.hh"
|
||||
|
||||
@@ -60,16 +62,10 @@ public:
|
||||
using bounds_range_type = typename primary_key_restrictions<T>::bounds_range_type;
|
||||
|
||||
::shared_ptr<primary_key_restrictions<T>> do_merge_to(schema_ptr schema, ::shared_ptr<restriction> restriction) const {
|
||||
if (restriction->is_multi_column()) {
|
||||
throw std::runtime_error(format("{} not implemented", __PRETTY_FUNCTION__));
|
||||
}
|
||||
return ::make_shared<single_column_primary_key_restrictions<T>>(schema, _allow_filtering)->merge_to(schema, restriction);
|
||||
}
|
||||
::shared_ptr<primary_key_restrictions<T>> merge_to(schema_ptr schema, ::shared_ptr<restriction> restriction) override {
|
||||
if (restriction->is_multi_column()) {
|
||||
throw std::runtime_error(format("{} not implemented", __PRETTY_FUNCTION__));
|
||||
}
|
||||
if (restriction->is_on_token()) {
|
||||
if (has_token(restriction->expression)) {
|
||||
return static_pointer_cast<token_restriction>(restriction);
|
||||
}
|
||||
return ::make_shared<single_column_primary_key_restrictions<T>>(schema, _allow_filtering)->merge_to(restriction);
|
||||
@@ -124,7 +120,7 @@ public:
|
||||
template<>
|
||||
::shared_ptr<primary_key_restrictions<partition_key>>
|
||||
statement_restrictions::initial_key_restrictions<partition_key>::merge_to(schema_ptr schema, ::shared_ptr<restriction> restriction) {
|
||||
if (restriction->is_on_token()) {
|
||||
if (has_token(restriction->expression)) {
|
||||
return static_pointer_cast<token_restriction>(restriction);
|
||||
}
|
||||
return do_merge_to(std::move(schema), std::move(restriction));
|
||||
@@ -133,8 +129,8 @@ statement_restrictions::initial_key_restrictions<partition_key>::merge_to(schema
|
||||
template<>
|
||||
::shared_ptr<primary_key_restrictions<clustering_key_prefix>>
|
||||
statement_restrictions::initial_key_restrictions<clustering_key_prefix>::merge_to(schema_ptr schema, ::shared_ptr<restriction> restriction) {
|
||||
if (restriction->is_multi_column()) {
|
||||
return static_pointer_cast<primary_key_restrictions<clustering_key_prefix>>(restriction);
|
||||
if (auto p = dynamic_pointer_cast<multi_column_restriction>(restriction)) {
|
||||
return p;
|
||||
}
|
||||
return do_merge_to(std::move(schema), std::move(restriction));
|
||||
}
|
||||
@@ -272,7 +268,7 @@ statement_restrictions::statement_restrictions(database& db,
|
||||
|
||||
if (_uses_secondary_indexing || _clustering_columns_restrictions->needs_filtering(*_schema)) {
|
||||
_index_restrictions.push_back(_clustering_columns_restrictions);
|
||||
} else if (_clustering_columns_restrictions->is_contains()) {
|
||||
} else if (find_if(_clustering_columns_restrictions->expression, &is_on_collection)) {
|
||||
fail(unimplemented::cause::INDEXES);
|
||||
#if 0
|
||||
_index_restrictions.push_back(new Forwardingprimary_key_restrictions() {
|
||||
@@ -317,9 +313,9 @@ statement_restrictions::statement_restrictions(database& db,
|
||||
}
|
||||
|
||||
void statement_restrictions::add_restriction(::shared_ptr<restriction> restriction, bool for_view, bool allow_filtering) {
|
||||
if (restriction->is_multi_column()) {
|
||||
if (dynamic_pointer_cast<multi_column_restriction>(restriction)) {
|
||||
_clustering_columns_restrictions = _clustering_columns_restrictions->merge_to(_schema, restriction);
|
||||
} else if (restriction->is_on_token()) {
|
||||
} else if (has_token(restriction->expression)) {
|
||||
_partition_key_restrictions = _partition_key_restrictions->merge_to(_schema, restriction);
|
||||
} else {
|
||||
add_single_column_restriction(::static_pointer_cast<single_column_restriction>(restriction), for_view, allow_filtering);
|
||||
@@ -336,8 +332,8 @@ void statement_restrictions::add_single_column_restriction(::shared_ptr<single_c
|
||||
// However, in a SELECT statement used to define a materialized view,
|
||||
// such a slice is fine - it is used to check whether individual
|
||||
// partitions, match, and does not present a performance problem.
|
||||
assert(!restriction->is_on_token());
|
||||
if (restriction->is_slice() && !for_view && !allow_filtering) {
|
||||
assert(!has_token(restriction->expression));
|
||||
if (has_slice(restriction->expression) && !for_view && !allow_filtering) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"Only EQ and IN relation are supported on the partition key (unless you use the token() function or allow filtering)");
|
||||
}
|
||||
@@ -398,7 +394,7 @@ std::vector<const column_definition*> statement_restrictions::get_column_defs_fo
|
||||
auto& sim = db.find_column_family(_schema).get_index_manager();
|
||||
auto [opt_idx, _] = find_idx(sim);
|
||||
auto column_uses_indexing = [&opt_idx] (const column_definition* cdef, ::shared_ptr<single_column_restriction> restr) {
|
||||
return opt_idx && restr && restr->is_supported_by(*opt_idx);
|
||||
return opt_idx && restr && is_supported_by(restr->expression, *opt_idx);
|
||||
};
|
||||
auto single_pk_restrs = dynamic_pointer_cast<single_column_partition_key_restrictions>(_partition_key_restrictions);
|
||||
if (_partition_key_restrictions->needs_filtering(*_schema)) {
|
||||
@@ -450,7 +446,7 @@ void statement_restrictions::process_partition_key_restrictions(bool has_queriab
|
||||
// - Is it queriable without 2ndary index, which is always more efficient
|
||||
// If a component of the partition key is restricted by a relation, all preceding
|
||||
// components must have a EQ. Only the last partition key component can be in IN relation.
|
||||
if (_partition_key_restrictions->is_on_token()) {
|
||||
if (has_token(_partition_key_restrictions->expression)) {
|
||||
_is_key_range = true;
|
||||
} else if (_partition_key_restrictions->has_unrestricted_components(*_schema)) {
|
||||
_is_key_range = true;
|
||||
@@ -482,11 +478,12 @@ void statement_restrictions::process_clustering_columns_restrictions(bool has_qu
|
||||
return;
|
||||
}
|
||||
|
||||
if (_clustering_columns_restrictions->is_IN() && select_a_collection) {
|
||||
if (clustering_key_restrictions_has_IN() && select_a_collection) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"Cannot restrict clustering columns by IN relations when a collection is selected by the query");
|
||||
}
|
||||
if (_clustering_columns_restrictions->is_contains() && !has_queriable_index && !allow_filtering) {
|
||||
if (find_if(_clustering_columns_restrictions->expression, is_on_collection)
|
||||
&& !has_queriable_index && !allow_filtering) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"Cannot restrict clustering columns by a CONTAINS relation without a secondary index or filtering");
|
||||
}
|
||||
|
||||
@@ -135,7 +135,7 @@ public:
|
||||
* otherwise.
|
||||
*/
|
||||
bool key_is_in_relation() const {
|
||||
return _partition_key_restrictions->is_IN();
|
||||
return find(_partition_key_restrictions->expression, operator_type::IN);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -145,7 +145,7 @@ public:
|
||||
* otherwise.
|
||||
*/
|
||||
bool clustering_key_restrictions_has_IN() const {
|
||||
return _clustering_columns_restrictions->is_IN();
|
||||
return find(_clustering_columns_restrictions->expression, operator_type::IN);
|
||||
}
|
||||
|
||||
bool clustering_key_restrictions_has_only_eq() const {
|
||||
|
||||
@@ -70,6 +70,14 @@ public:
|
||||
return _column_definitions;
|
||||
}
|
||||
|
||||
void merge_with(::shared_ptr<restriction> restriction) override {
|
||||
expression = make_conjunction(std::move(expression), restriction->expression);
|
||||
}
|
||||
|
||||
bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return cql3::restrictions::uses_function(expression, ks_name, function_name);
|
||||
}
|
||||
|
||||
virtual bool has_supporting_index(const secondary_index::secondary_index_manager& index_manager, allow_local_index allow_local) const override {
|
||||
return false;
|
||||
}
|
||||
@@ -86,28 +94,16 @@ public:
|
||||
}
|
||||
|
||||
std::vector<bounds_range_type> bounds_ranges(const query_options& options) const override {
|
||||
auto get_token_bound = [this, &options](statements::bound b) {
|
||||
if (!has_bound(b)) {
|
||||
return is_start(b) ? dht::minimum_token() : dht::maximum_token();
|
||||
}
|
||||
auto buf= bounds(b, options).front();
|
||||
if (!buf) {
|
||||
throw exceptions::invalid_request_exception("Invalid null token value");
|
||||
}
|
||||
auto tk = dht::token::from_bytes(*buf);
|
||||
if (tk.is_minimum() && !is_start(b)) {
|
||||
// The token was parsed as a minimum marker (token::kind::before_all_keys), but
|
||||
// as it appears in the end bound position, it is actually the maximum marker
|
||||
// (token::kind::after_all_keys).
|
||||
return dht::maximum_token();
|
||||
}
|
||||
return tk;
|
||||
};
|
||||
|
||||
const auto start_token = get_token_bound(statements::bound::START);
|
||||
const auto end_token = get_token_bound(statements::bound::END);
|
||||
const auto include_start = this->is_inclusive(statements::bound::START);
|
||||
const auto include_end = this->is_inclusive(statements::bound::END);
|
||||
auto values = possible_lhs_values(nullptr, expression, options);
|
||||
if (values == value_set(value_list{})) {
|
||||
return {};
|
||||
}
|
||||
const auto bounds = to_range(values);
|
||||
const auto start_token = bounds.start() ? dht::token::from_bytes(bounds.start()->value())
|
||||
: dht::minimum_token();
|
||||
auto end_token = bounds.end() ? dht::token::from_bytes(bounds.end()->value()) : dht::maximum_token();
|
||||
const bool include_start = bounds.start() && bounds.start()->is_inclusive();
|
||||
const auto include_end = bounds.end() && bounds.end()->is_inclusive();
|
||||
|
||||
/*
|
||||
* If we ask SP.getRangeSlice() for (token(200), token(200)], it will happily return the whole ring.
|
||||
@@ -149,16 +145,6 @@ public:
|
||||
, _value(std::move(value))
|
||||
{}
|
||||
|
||||
bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
|
||||
return restriction::term_uses_function(_value, ks_name, function_name);
|
||||
}
|
||||
|
||||
void merge_with(::shared_ptr<restriction>) override {
|
||||
throw exceptions::invalid_request_exception(
|
||||
join(", ", get_column_defs())
|
||||
+ " cannot be restricted by more than one relation if it includes an Equal");
|
||||
}
|
||||
|
||||
std::vector<bytes_opt> values(const query_options& options) const override {
|
||||
return { to_bytes_opt(_value->bind_and_get(options)) };
|
||||
}
|
||||
@@ -195,50 +181,9 @@ public:
|
||||
return { to_bytes_opt(_slice.bound(b)->bind_and_get(options)) };
|
||||
}
|
||||
|
||||
bool uses_function(const sstring& ks_name,
|
||||
const sstring& function_name) const override {
|
||||
return (_slice.has_bound(statements::bound::START)
|
||||
&& restriction::term_uses_function(
|
||||
_slice.bound(statements::bound::START), ks_name,
|
||||
function_name))
|
||||
|| (_slice.has_bound(statements::bound::END)
|
||||
&& restriction::term_uses_function(
|
||||
_slice.bound(statements::bound::END),
|
||||
ks_name, function_name));
|
||||
}
|
||||
bool is_inclusive(statements::bound b) const override {
|
||||
return _slice.is_inclusive(b);
|
||||
}
|
||||
void merge_with(::shared_ptr<restriction> restriction) override {
|
||||
try {
|
||||
if (!restriction->is_on_token()) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"Columns \"%s\" cannot be restricted by both a normal relation and a token relation");
|
||||
}
|
||||
if (!restriction->is_slice()) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"Columns \"%s\" cannot be restricted by both an equality and an inequality relation");
|
||||
}
|
||||
|
||||
auto* other_slice = static_cast<slice *>(restriction.get());
|
||||
|
||||
if (has_bound(statements::bound::START)
|
||||
&& other_slice->has_bound(statements::bound::START)) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"More than one restriction was found for the start bound on %s");
|
||||
}
|
||||
if (has_bound(statements::bound::END)
|
||||
&& other_slice->has_bound(statements::bound::END)) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"More than one restriction was found for the end bound on %s");
|
||||
}
|
||||
_slice.merge(other_slice->_slice);
|
||||
this->expression = make_conjunction(std::move(this->expression), restriction->expression);
|
||||
} catch (exceptions::invalid_request_exception & e) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
sprint(e.what(), join(", ", get_column_defs())));
|
||||
}
|
||||
}
|
||||
sstring to_string() const override {
|
||||
return format("SLICE{}", _slice);
|
||||
}
|
||||
|
||||
@@ -417,10 +417,11 @@ bool result_set_builder::restrictions_filter::do_filter(const selection& selecti
|
||||
}
|
||||
|
||||
auto clustering_columns_restrictions = _restrictions->get_clustering_columns_restrictions();
|
||||
if (clustering_columns_restrictions->is_multi_column()) {
|
||||
auto multi_column_restriction = dynamic_pointer_cast<cql3::restrictions::multi_column_restriction>(clustering_columns_restrictions);
|
||||
if (dynamic_pointer_cast<cql3::restrictions::multi_column_restriction>(clustering_columns_restrictions)) {
|
||||
clustering_key_prefix ckey = clustering_key_prefix::from_exploded(clustering_key);
|
||||
return multi_column_restriction->is_satisfied_by(*_schema, ckey, _options);
|
||||
return restrictions::is_satisfied_by(
|
||||
clustering_columns_restrictions->expression,
|
||||
partition_key, clustering_key, static_row, row, selection, _options);
|
||||
}
|
||||
|
||||
auto static_row_iterator = static_row.iterator();
|
||||
@@ -434,29 +435,13 @@ bool result_set_builder::restrictions_filter::do_filter(const selection& selecti
|
||||
if (cdef->kind == column_kind::regular_column && !row_iterator) {
|
||||
continue;
|
||||
}
|
||||
auto& cell_iterator = (cdef->kind == column_kind::static_column) ? static_row_iterator : *row_iterator;
|
||||
std::optional<query::result_bytes_view> result_view_opt;
|
||||
if (cdef->type->is_multi_cell()) {
|
||||
result_view_opt = cell_iterator.next_collection_cell();
|
||||
} else {
|
||||
auto cell = cell_iterator.next_atomic_cell();
|
||||
if (cell) {
|
||||
result_view_opt = cell->value();
|
||||
}
|
||||
}
|
||||
auto restr_it = non_pk_restrictions_map.find(cdef);
|
||||
if (restr_it == non_pk_restrictions_map.end()) {
|
||||
continue;
|
||||
}
|
||||
restrictions::single_column_restriction& restriction = *restr_it->second;
|
||||
bool regular_restriction_matches;
|
||||
if (result_view_opt) {
|
||||
regular_restriction_matches = result_view_opt->with_linearized([&restriction, this](bytes_view data) {
|
||||
return restriction.is_satisfied_by(data, _options);
|
||||
});
|
||||
} else {
|
||||
regular_restriction_matches = restriction.is_satisfied_by(bytes(), _options);
|
||||
}
|
||||
bool regular_restriction_matches = restrictions::is_satisfied_by(
|
||||
restriction.expression, partition_key, clustering_key, static_row, row, selection, _options);
|
||||
if (!regular_restriction_matches) {
|
||||
_current_static_row_does_not_match = (cdef->kind == column_kind::static_column);
|
||||
return false;
|
||||
@@ -473,9 +458,8 @@ bool result_set_builder::restrictions_filter::do_filter(const selection& selecti
|
||||
continue;
|
||||
}
|
||||
restrictions::single_column_restriction& restriction = *restr_it->second;
|
||||
const bytes& value_to_check = partition_key[cdef->id];
|
||||
bool pk_restriction_matches = restriction.is_satisfied_by(value_to_check, _options);
|
||||
if (!pk_restriction_matches) {
|
||||
if (!restrictions::is_satisfied_by(
|
||||
restriction.expression, partition_key, clustering_key, static_row, row, selection, _options)) {
|
||||
_current_partition_key_does_not_match = true;
|
||||
return false;
|
||||
}
|
||||
@@ -494,9 +478,8 @@ bool result_set_builder::restrictions_filter::do_filter(const selection& selecti
|
||||
return false;
|
||||
}
|
||||
restrictions::single_column_restriction& restriction = *restr_it->second;
|
||||
const bytes& value_to_check = clustering_key[cdef->id];
|
||||
bool pk_restriction_matches = restriction.is_satisfied_by(value_to_check, _options);
|
||||
if (!pk_restriction_matches) {
|
||||
if (!restrictions::is_satisfied_by(
|
||||
restriction.expression, partition_key, clustering_key, static_row, row, selection, _options)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,9 +39,14 @@
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
|
||||
#include "cql3/tuples.hh"
|
||||
#include "database.hh"
|
||||
#include "delete_statement.hh"
|
||||
#include "raw/delete_statement.hh"
|
||||
#include "database.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
@@ -79,6 +84,60 @@ void delete_statement::add_update_for_key(mutation& m, const query::clustering_r
|
||||
|
||||
namespace raw {
|
||||
|
||||
namespace {
|
||||
|
||||
using namespace restrictions;
|
||||
|
||||
/// True iff parameters indicate a multi-column comparison.
|
||||
bool is_multi_column(size_t column_count, const term* t) {
|
||||
return column_count > 1 || dynamic_cast<const tuples::value*>(t) || dynamic_cast<const tuples::marker*>(t)
|
||||
|| dynamic_cast<const tuples::delayed_value*>(t);
|
||||
}
|
||||
|
||||
/// True iff expr bounds clustering key from both above and below OR it has no clustering-key bounds at all.
|
||||
/// See #6493.
|
||||
bool bounded_ck(const expression& expr) {
|
||||
return std::visit(overloaded_functor{
|
||||
[] (bool b) { return !b; },
|
||||
[] (const binary_operator& oper) {
|
||||
return *oper.op == operator_type::EQ; // Without EQ, one side must be unbounded.
|
||||
},
|
||||
[] (const conjunction& conj) {
|
||||
using bounds_bitvector = int; // Combined using binary OR.
|
||||
static constexpr bounds_bitvector UPPER=1, LOWER=2;
|
||||
std::unordered_map<const column_definition*, bounds_bitvector> found_bounds;
|
||||
for (const auto& child : conj.children) {
|
||||
std::visit(overloaded_functor{
|
||||
[&] (const binary_operator& oper) {
|
||||
if (auto cvs = std::get_if<std::vector<column_value>>(&oper.lhs)) {
|
||||
// The rules of multi-column comparison imply that any multi-column
|
||||
// expression sets a bound for the entire clustering key. Therefore, we
|
||||
// represent any such expression with special pointer value nullptr.
|
||||
auto col = is_multi_column(cvs->size(), oper.rhs.get()) ? nullptr : cvs->front().col;
|
||||
if (col && !col->is_clustering_key()) {
|
||||
return;
|
||||
}
|
||||
if (*oper.op == operator_type::EQ) {
|
||||
found_bounds[col] = UPPER | LOWER;
|
||||
} else if (*oper.op == operator_type::LT || *oper.op == operator_type::LTE) {
|
||||
found_bounds[col] |= UPPER;
|
||||
} else if (*oper.op == operator_type::GTE || *oper.op == operator_type::GT) {
|
||||
found_bounds[col] |= LOWER;
|
||||
}
|
||||
}
|
||||
},
|
||||
[] (const auto& default_case) {}, // Assumes conjunctions are flattened.
|
||||
}, child);
|
||||
}
|
||||
// Since multi-column comparisons can't be mixed with single-column ones, found_bounds will
|
||||
// either have a single entry with key nullptr or one entry per restricted column.
|
||||
return boost::algorithm::all_of_equal(found_bounds | boost::adaptors::map_values, UPPER | LOWER);
|
||||
},
|
||||
}, expr);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
::shared_ptr<cql3::statements::modification_statement>
|
||||
delete_statement::prepare_internal(database& db, schema_ptr schema, variable_specifications& bound_names,
|
||||
std::unique_ptr<attributes> attrs, cql_stats& stats) const {
|
||||
@@ -103,13 +162,12 @@ delete_statement::prepare_internal(database& db, schema_ptr schema, variable_spe
|
||||
}
|
||||
prepare_conditions(db, *schema, bound_names, *stmt);
|
||||
stmt->process_where_clause(db, _where_clause, bound_names);
|
||||
if (!db.supports_infinite_bound_range_deletions()) {
|
||||
if (!stmt->restrictions().get_clustering_columns_restrictions()->has_bound(bound::START)
|
||||
|| !stmt->restrictions().get_clustering_columns_restrictions()->has_bound(bound::END)) {
|
||||
throw exceptions::invalid_request_exception("A range deletion operation needs to specify both bounds for clusters without sstable mc format support");
|
||||
}
|
||||
if (!db.supports_infinite_bound_range_deletions() &&
|
||||
!bounded_ck(stmt->restrictions().get_clustering_columns_restrictions()->expression)) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
"A range deletion operation needs to specify both bounds for clusters without sstable mc format support");
|
||||
}
|
||||
if (stmt->restrictions().get_clustering_columns_restrictions()->is_slice()) {
|
||||
if (has_slice(stmt->restrictions().get_clustering_columns_restrictions()->expression)) {
|
||||
if (!schema->is_compound()) {
|
||||
throw exceptions::invalid_request_exception("Range deletions on \"compact storage\" schemas are not supported");
|
||||
}
|
||||
|
||||
@@ -466,9 +466,9 @@ modification_statement::process_where_clause(database& db, std::vector<relation_
|
||||
_has_regular_column_conditions = true;
|
||||
}
|
||||
}
|
||||
if (_restrictions->get_partition_key_restrictions()->is_on_token()) {
|
||||
if (has_token(_restrictions->get_partition_key_restrictions()->expression)) {
|
||||
throw exceptions::invalid_request_exception(format("The token function cannot be used in WHERE clauses for UPDATE and DELETE statements: {}",
|
||||
_restrictions->get_partition_key_restrictions()->to_string()));
|
||||
to_string(_restrictions->get_partition_key_restrictions()->expression)));
|
||||
}
|
||||
if (!_restrictions->get_non_pk_restriction().empty()) {
|
||||
auto column_names = ::join(", ", _restrictions->get_non_pk_restriction()
|
||||
@@ -478,8 +478,9 @@ modification_statement::process_where_clause(database& db, std::vector<relation_
|
||||
throw exceptions::invalid_request_exception(format("Invalid where clause contains non PRIMARY KEY columns: {}", column_names));
|
||||
}
|
||||
auto ck_restrictions = _restrictions->get_clustering_columns_restrictions();
|
||||
if (ck_restrictions->is_slice() && !allow_clustering_key_slices()) {
|
||||
throw exceptions::invalid_request_exception(format("Invalid operator in where clause {}", ck_restrictions->to_string()));
|
||||
if (has_slice(ck_restrictions->expression) && !allow_clustering_key_slices()) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
format("Invalid operator in where clause {}", to_string(ck_restrictions->expression)));
|
||||
}
|
||||
if (_restrictions->has_unrestricted_clustering_columns() && !applies_only_to_static_columns() && !s->is_dense()) {
|
||||
// Tomek: Origin had "&& s->comparator->is_composite()" in the condition below.
|
||||
@@ -497,7 +498,7 @@ modification_statement::process_where_clause(database& db, std::vector<relation_
|
||||
}
|
||||
// In general, we can't modify specific columns if not all clustering columns have been specified.
|
||||
// However, if we modify only static columns, it's fine since we won't really use the prefix anyway.
|
||||
if (!ck_restrictions->is_slice()) {
|
||||
if (!has_slice(ck_restrictions->expression)) {
|
||||
auto& col = s->column_at(column_kind::clustering_key, ck_restrictions->size());
|
||||
for (auto&& op : _column_operations) {
|
||||
if (!op->column.is_static()) {
|
||||
|
||||
@@ -765,8 +765,8 @@ primary_key_select_statement::primary_key_select_statement(schema_ptr schema, ui
|
||||
if (_ks_sel == ks_selector::NONSYSTEM) {
|
||||
if (_restrictions->need_filtering() ||
|
||||
_restrictions->get_partition_key_restrictions()->empty() ||
|
||||
(_restrictions->get_partition_key_restrictions()->is_on_token() &&
|
||||
!_restrictions->get_partition_key_restrictions()->is_EQ())) {
|
||||
(has_token(_restrictions->get_partition_key_restrictions()->expression) &&
|
||||
!find(_restrictions->get_partition_key_restrictions()->expression, operator_type::EQ))) {
|
||||
_range_scan = true;
|
||||
if (!_parameters->bypass_cache())
|
||||
_range_scan_no_bypass_cache = true;
|
||||
@@ -1066,7 +1066,7 @@ query::partition_slice indexed_table_select_statement::get_partition_slice_for_g
|
||||
auto clustering_restrictions = ::make_shared<restrictions::single_column_clustering_key_restrictions>(_view_schema, *single_pk_restrictions);
|
||||
// Computed token column needs to be added to index view restrictions
|
||||
const column_definition& token_cdef = *_view_schema->clustering_key_columns().begin();
|
||||
auto base_pk = partition_key::from_optional_exploded(*_schema, _restrictions->get_partition_key_restrictions()->values(options));
|
||||
auto base_pk = partition_key::from_optional_exploded(*_schema, single_pk_restrictions->values(options));
|
||||
bytes token_value = dht::get_token(*_schema, base_pk).data();
|
||||
auto token_restriction = ::make_shared<restrictions::single_column_restriction::EQ>(token_cdef, ::make_shared<cql3::constants::value>(cql3::raw_value::make_value(token_value)));
|
||||
token_restriction->expression = restrictions::make_column_op(
|
||||
|
||||
@@ -176,12 +176,15 @@ void stats::register_stats() {
|
||||
}
|
||||
|
||||
bool partition_key_matches(const schema& base, const view_info& view, const dht::decorated_key& key, gc_clock::time_point now) {
|
||||
return view.select_statement().get_restrictions()->get_partition_key_restrictions()->is_satisfied_by(
|
||||
base, key.key(), clustering_key_prefix::make_empty(), row(), cql3::query_options({ }), now);
|
||||
const auto r = view.select_statement().get_restrictions()->get_partition_key_restrictions();
|
||||
return cql3::restrictions::is_satisfied_by(
|
||||
r->expression, base, key.key(), clustering_key_prefix::make_empty(), row(), cql3::query_options({ }), now);
|
||||
}
|
||||
|
||||
bool clustering_prefix_matches(const schema& base, const view_info& view, const partition_key& key, const clustering_key_prefix& ck, gc_clock::time_point now) {
|
||||
return view.select_statement().get_restrictions()->get_clustering_columns_restrictions()->is_satisfied_by(
|
||||
const auto r = view.select_statement().get_restrictions()->get_clustering_columns_restrictions();
|
||||
return cql3::restrictions::is_satisfied_by(
|
||||
r->expression,
|
||||
base, key, ck, row(), cql3::query_options({ }), now);
|
||||
}
|
||||
|
||||
@@ -238,7 +241,8 @@ bool matches_view_filter(const schema& base, const view_info& view, const partit
|
||||
&& boost::algorithm::all_of(
|
||||
view.select_statement().get_restrictions()->get_non_pk_restriction() | boost::adaptors::map_values,
|
||||
[&] (auto&& r) {
|
||||
return r->is_satisfied_by(base, key, update.key(), update.cells(), cql3::query_options({ }), now);
|
||||
return cql3::restrictions::is_satisfied_by(
|
||||
r->expression, base, key, update.key(), update.cells(), cql3::query_options({ }), now);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -104,10 +104,7 @@ SEASTAR_TEST_CASE(test_like_operator_conjunction) {
|
||||
require_rows(e, "select s1 from t where s1 like 'a%' and s1 like '%' allow filtering", {{T("a")}, {T("abc")}});
|
||||
require_rows(e, "select s1 from t where s1 like 'a%' and s1 like '_b_' and s1 like '%c' allow filtering",
|
||||
{{T("abc")}});
|
||||
BOOST_REQUIRE_EXCEPTION(
|
||||
e.execute_cql("select * from t where s1 like 'a%' and s1 = 'abc' allow filtering").get(),
|
||||
exceptions::invalid_request_exception,
|
||||
exception_predicate::message_contains("LIKE and non-LIKE"));
|
||||
require_rows(e, "select s1 from t where s1 like 'a%' and s1 = 'abc' allow filtering", {{T("abc")}});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -4542,38 +4542,6 @@ SEASTAR_TEST_CASE(test_alter_table_default_ttl_reset) {
|
||||
});
|
||||
}
|
||||
|
||||
SEASTAR_TEST_CASE(equals_null_is_forbidden) {
|
||||
return do_with_cql_env([](cql_test_env& e) {
|
||||
return seastar::async([&e] {
|
||||
cquery_nofail(
|
||||
e, "create table t (pk int, ck1 int, ck2 int, r int, m map<int, int>, primary key(pk, ck1, ck2))");
|
||||
cquery_nofail(e, "insert into t(pk,ck1,ck2,r,m) values(1,11,21,101,{1:1})");
|
||||
using ire = exceptions::invalid_request_exception;
|
||||
const auto nullerr = exception_predicate::message_contains("null");
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where pk=null").get(), ire, nullerr);
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where token(pk)=null").get(), ire, nullerr);
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where ck1=null allow filtering").get(), ire, nullerr);
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where (ck1,ck2)=(null,1) allow filtering").get(),
|
||||
ire, nullerr);
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where r=null allow filtering").get(), ire, nullerr);
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where m[1]=null allow filtering").get(), ire, nullerr);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
SEASTAR_TEST_CASE(ck_slice_with_null_is_forbidden) {
|
||||
return do_with_cql_env([](cql_test_env& e) {
|
||||
return seastar::async([&e] {
|
||||
cquery_nofail(e, "create table t (p int primary key, r int)");
|
||||
cquery_nofail(e, "insert into t(p,r) values (1,11)");
|
||||
using ire = exceptions::invalid_request_exception;
|
||||
const auto nullerr = exception_predicate::message_contains("null");
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where r<null allow filtering").get(), ire, nullerr);
|
||||
BOOST_REQUIRE_EXCEPTION(e.execute_cql("select * from t where r>null allow filtering").get(), ire, nullerr);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
SEASTAR_TEST_CASE(test_internal_alter_table_on_a_distributed_table) {
|
||||
return do_with_cql_env([](cql_test_env& e) {
|
||||
return seastar::async([&e] {
|
||||
|
||||
828
test/boost/restrictions_test.cc
Normal file
828
test/boost/restrictions_test.cc
Normal file
@@ -0,0 +1,828 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <boost/range/adaptors.hpp>
|
||||
#include <experimental/source_location>
|
||||
#include <fmt/format.h>
|
||||
#include <seastar/testing/thread_test_case.hh>
|
||||
|
||||
#include "cql3/cql_config.hh"
|
||||
#include "cql3/values.hh"
|
||||
#include "test/lib/cql_assertions.hh"
|
||||
#include "test/lib/cql_test_env.hh"
|
||||
#include "test/lib/exception_utils.hh"
|
||||
#include "types/list.hh"
|
||||
#include "types/map.hh"
|
||||
#include "types/set.hh"
|
||||
|
||||
namespace {
|
||||
|
||||
using std::experimental::source_location;
|
||||
using boost::adaptors::transformed;
|
||||
|
||||
std::unique_ptr<cql3::query_options> to_options(
|
||||
std::optional<std::vector<sstring_view>> names,
|
||||
std::vector<cql3::raw_value> values) {
|
||||
static auto& d = cql3::query_options::DEFAULT;
|
||||
return std::make_unique<cql3::query_options>(
|
||||
cql3::cql_config{},
|
||||
d.get_consistency(), d.get_timeout_config(), std::move(names), std::move(values), d.skip_metadata(),
|
||||
d.get_specific_options(), d.get_cql_serialization_format());
|
||||
}
|
||||
|
||||
/// Asserts that e.execute_prepared(id, values) contains expected rows, in any order.
|
||||
void require_rows(cql_test_env& e,
|
||||
cql3::prepared_cache_key_type id,
|
||||
std::optional<std::vector<sstring_view>> names,
|
||||
const std::vector<bytes_opt>& values,
|
||||
const std::vector<std::vector<bytes_opt>>& expected,
|
||||
const std::experimental::source_location& loc = source_location::current()) {
|
||||
// This helps compiler pick the right overload for make_value:
|
||||
const auto rvals = values | transformed([] (const bytes_opt& v) { return cql3::raw_value::make_value(v); });
|
||||
auto opts = to_options(std::move(names), std::vector(rvals.begin(), rvals.end()));
|
||||
try {
|
||||
assert_that(e.execute_prepared_with_qo(id, std::move(opts)).get0()).is_rows().with_rows_ignore_order(expected);
|
||||
} catch (const std::exception& e) {
|
||||
BOOST_FAIL(format("execute_prepared failed: {}\n{}:{}: originally from here",
|
||||
e.what(), loc.file_name(), loc.line()));
|
||||
}
|
||||
}
|
||||
|
||||
auto I(int32_t x) { return int32_type->decompose(x); }
|
||||
auto F(float f) { return float_type->decompose(f); }
|
||||
auto T(const char* t) { return utf8_type->decompose(t); }
|
||||
|
||||
auto SI(const set_type_impl::native_type& val) {
|
||||
const auto int_set_type = set_type_impl::get_instance(int32_type, true);
|
||||
return int_set_type->decompose(make_set_value(int_set_type, val));
|
||||
};
|
||||
|
||||
auto ST(const set_type_impl::native_type& val) {
|
||||
const auto text_set_type = set_type_impl::get_instance(utf8_type, true);
|
||||
return text_set_type->decompose(make_set_value(text_set_type, val));
|
||||
};
|
||||
|
||||
auto LI(const list_type_impl::native_type& val) {
|
||||
const auto int_list_type = list_type_impl::get_instance(int32_type, true);
|
||||
return int_list_type->decompose(make_list_value(int_list_type, val));
|
||||
}
|
||||
|
||||
auto LF(const list_type_impl::native_type& val) {
|
||||
const auto float_list_type = list_type_impl::get_instance(float_type, true);
|
||||
return float_list_type->decompose(make_list_value(float_list_type, val));
|
||||
}
|
||||
|
||||
auto LT(const list_type_impl::native_type& val) {
|
||||
const auto text_list_type = list_type_impl::get_instance(utf8_type, true);
|
||||
return text_list_type->decompose(make_list_value(text_list_type, val));
|
||||
}
|
||||
|
||||
/// Creates a table t with int columns p, q, and r. Inserts data (i,10+i,20+i) for i = 0 to n.
|
||||
void create_t_with_p_q_r(cql_test_env& e, size_t n) {
|
||||
cquery_nofail(e, "create table t (p int primary key, q int, r int)");
|
||||
for (size_t i = 0; i <= n; ++i) {
|
||||
cquery_nofail(e, fmt::format("insert into t (p,q,r) values ({},{},{});", i, 10+i, 20+i));
|
||||
}
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(regular_col_eq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
create_t_with_p_q_r(e, 3);
|
||||
require_rows(e, "select q from t where q=12 allow filtering", {{I(12)}});
|
||||
require_rows(e, "select q from t where q=12 and q=12 allow filtering", {{I(12)}});
|
||||
require_rows(e, "select q from t where q=12 and q=13 allow filtering", {});
|
||||
require_rows(e, "select r from t where q=12 and p=2 allow filtering", {{I(22), I(12)}});
|
||||
require_rows(e, "select p from t where q=12 and r=22 allow filtering", {{I(2), I(12), I(22)}});
|
||||
require_rows(e, "select r from t where q=12 and p=2 and r=99 allow filtering", {});
|
||||
cquery_nofail(e, "insert into t(p) values (100)");
|
||||
require_rows(e, "select q from t where q=12 allow filtering", {{I(12)}});
|
||||
auto stmt = e.prepare("select q from t where q=? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {I(12)}, {{I(12)}});
|
||||
require_rows(e, stmt, {}, {I(99)}, {});
|
||||
stmt = e.prepare("select q from t where q=:q allow filtering").get0();
|
||||
require_rows(e, stmt, {{"q"}}, {I(12)}, {{I(12)}});
|
||||
require_rows(e, stmt, {{"q"}}, {I(99)}, {});
|
||||
stmt = e.prepare("select p from t where q=? and r=? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {I(12), I(22)}, {{I(2), I(12), I(22)}});
|
||||
require_rows(e, stmt, {}, {I(11), I(21)}, {{I(1), I(11), I(21)}});
|
||||
require_rows(e, stmt, {}, {I(11), I(22)}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(map_eq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int primary key, m frozen<map<int,int>>)");
|
||||
cquery_nofail(e, "insert into t (p, m) values (1, {1:11, 2:12, 3:13})");
|
||||
cquery_nofail(e, "insert into t (p, m) values (2, {1:21, 2:22, 3:23})");
|
||||
const auto my_map_type = map_type_impl::get_instance(int32_type, int32_type, true);
|
||||
const auto m1 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 11}, {2, 12}, {3, 13}})));
|
||||
require_rows(e, "select p from t where m={1:11, 2:12, 3:13} allow filtering", {{I(1), m1}});
|
||||
require_rows(e, "select p from t where m={1:11, 2:12} allow filtering", {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(set_eq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int primary key, m frozen<set<int>>)");
|
||||
cquery_nofail(e, "insert into t (p, m) values (1, {11,12,13})");
|
||||
cquery_nofail(e, "insert into t (p, m) values (2, {21,22,23})");
|
||||
require_rows(e, "select p from t where m={21,22,23} allow filtering", {{I(2), SI({21, 22, 23})}});
|
||||
require_rows(e, "select p from t where m={21,22,23,24} allow filtering", {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(list_eq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int primary key, li frozen<list<int>>)");
|
||||
cquery_nofail(e, "insert into t (p, li) values (1, [11,12,13])");
|
||||
cquery_nofail(e, "insert into t (p, li) values (2, [21,22,23])");
|
||||
require_rows(e, "select p from t where li=[21,22,23] allow filtering", {{I(2), LI({21, 22, 23})}});
|
||||
require_rows(e, "select p from t where li=[23,22,21] allow filtering", {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(list_slice) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int primary key, li frozen<list<int>>)");
|
||||
cquery_nofail(e, "insert into t (p, li) values (1, [11,12,13])");
|
||||
cquery_nofail(e, "insert into t (p, li) values (2, [21,22,23])");
|
||||
require_rows(e, "select li from t where li<[23,22,21] allow filtering",
|
||||
{{LI({11, 12, 13})}, {LI({21, 22, 23})}});
|
||||
require_rows(e, "select li from t where li>=[11,12,13] allow filtering",
|
||||
{{LI({11, 12, 13})}, {LI({21, 22, 23})}});
|
||||
require_rows(e, "select li from t where li>[11,12,13] allow filtering", {{LI({21, 22, 23})}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(tuple_of_list) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, l1 frozen<list<int>>, l2 frozen<list<int>>, primary key(p,l1,l2))");
|
||||
cquery_nofail(e, "insert into t (p, l1, l2) values (1, [11,12], [101,102])");
|
||||
cquery_nofail(e, "insert into t (p, l1, l2) values (2, [21,22], [201,202])");
|
||||
require_rows(e, "select * from t where (l1,l2)<([],[]) allow filtering", {});
|
||||
require_rows(e, "select l1 from t where (l1,l2)<([20],[200]) allow filtering", {{LI({11, 12}), LI({101, 102})}});
|
||||
require_rows(e, "select l1 from t where (l1,l2)>=([11,12],[101,102]) allow filtering",
|
||||
{{LI({11, 12}), LI({101, 102})}, {LI({21, 22}), LI({201, 202})}});
|
||||
require_rows(e, "select l1 from t where (l1,l2)<([11,12],[101,103]) allow filtering",
|
||||
{{LI({11, 12}), LI({101, 102})}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(map_entry_eq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int primary key, m map<int,int>)");
|
||||
cquery_nofail(e, "insert into t (p, m) values (1, {1:11, 2:12, 3:13})");
|
||||
cquery_nofail(e, "insert into t (p, m) values (2, {1:21, 2:22, 3:23})");
|
||||
cquery_nofail(e, "insert into t (p, m) values (3, {1:31, 2:32, 3:33})");
|
||||
const auto my_map_type = map_type_impl::get_instance(int32_type, int32_type, true);
|
||||
const auto m2 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 21}, {2, 22}, {3, 23}})));
|
||||
require_rows(e, "select p from t where m[1]=21 allow filtering", {{I(2), m2}});
|
||||
require_rows(e, "select p from t where m[1]=21 and m[3]=23 allow filtering", {{I(2), m2}});
|
||||
require_rows(e, "select p from t where m[99]=21 allow filtering", {});
|
||||
require_rows(e, "select p from t where m[1]=99 allow filtering", {});
|
||||
cquery_nofail(e, "delete from t where p=2");
|
||||
require_rows(e, "select p from t where m[1]=21 allow filtering", {});
|
||||
require_rows(e, "select p from t where m[1]=21 and m[3]=23 allow filtering", {});
|
||||
const auto m3 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 31}, {2, 32}, {3, 33}})));
|
||||
require_rows(e, "select m from t where m[1]=31 allow filtering", {{m3}});
|
||||
cquery_nofail(e, "update t set m={1:111} where p=3");
|
||||
require_rows(e, "select p from t where m[1]=31 allow filtering", {});
|
||||
require_rows(e, "select p from t where m[1]=21 allow filtering", {});
|
||||
const auto m3new = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 111}})));
|
||||
require_rows(e, "select p from t where m[1]=111 allow filtering", {{I(3), m3new}});
|
||||
const auto stmt = e.prepare("select p from t where m[1]=:uno and m[3]=:tres allow filtering").get0();
|
||||
const auto m1 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 11}, {2, 12}, {3, 13}})));
|
||||
require_rows(e, stmt, {{"uno", "tres"}}, {I(11), I(13)}, {{I(1), m1}});
|
||||
require_rows(e, stmt, {{"uno", "tres"}}, {I(21), I(99)}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(regular_col_slice) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
create_t_with_p_q_r(e, 3);
|
||||
require_rows(e, "select q from t where q>12 allow filtering", {{I(13)}});
|
||||
require_rows(e, "select q from t where q<12 allow filtering", {{I(10)}, {I(11)}});
|
||||
require_rows(e, "select q from t where q>99 allow filtering", {});
|
||||
require_rows(e, "select r from t where q<12 and q>=11 allow filtering", {{I(21), I(11)}});
|
||||
require_rows(e, "select * from t where q<11 and q>11 allow filtering", {});
|
||||
require_rows(e, "select q from t where q<=12 and r>=21 allow filtering", {{I(11), I(21)}, {I(12), I(22)}});
|
||||
cquery_nofail(e, "insert into t(p) values (4)");
|
||||
require_rows(e, "select q from t where q<12 allow filtering", {{std::nullopt}, {I(10)}, {I(11)}});
|
||||
require_rows(e, "select q from t where q>10 allow filtering", {{I(11)}, {I(12)}, {I(13)}});
|
||||
require_rows(e, "select q from t where q<12 and q>10 allow filtering", {{I(11)}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(regular_col_slice_reversed) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, c int, primary key(p, c)) with clustering order by (c desc)");
|
||||
cquery_nofail(e, "insert into t(p,c) values (1,11)");
|
||||
require_rows(e, "select c from t where c>10 allow filtering", {{I(11)}});
|
||||
cquery_nofail(e, "insert into t(p,c) values (1,12)");
|
||||
require_rows(e, "select c from t where c>10 allow filtering", {{I(11)}, {I(12)}});
|
||||
require_rows(e, "select c from t where c<100 allow filtering", {{I(11)}, {I(12)}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
#if 0 // TODO: enable when supported.
|
||||
SEASTAR_THREAD_TEST_CASE(regular_col_neq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
create_t_with_p_q_r(e, 3);
|
||||
require_rows(e, "select q from t where q!=10 allow filtering", {{I(11)}, {I(12)}, {I(13)}});
|
||||
require_rows(e, "select q from t where q!=10 and q!=13 allow filtering", {{I(11)}, {I(12)}});
|
||||
require_rows(e, "select r from t where q!=11 and r!=22 allow filtering", {{I(10), I(20)}, {I(13), I(23)}});
|
||||
}).get();
|
||||
}
|
||||
#endif // 0
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(null) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (pk1 int, pk2 int, ck1 float, ck2 float, r text, primary key((pk1,pk2),ck1,ck2))");
|
||||
const auto q = [&] (const char* stmt) { return e.execute_cql(stmt).get(); };
|
||||
using ire = exceptions::invalid_request_exception;
|
||||
using exception_predicate::message_contains;
|
||||
const char* expect = "Invalid null";
|
||||
// TODO: investigate why null comparison isn't allowed here.
|
||||
BOOST_REQUIRE_EXCEPTION(q("select * from t where pk1=0 and pk2=null"), ire, message_contains(expect));
|
||||
BOOST_REQUIRE_EXCEPTION(q("select * from t where pk1=0 and pk2=0 and (ck1,ck2)>=(0,null)"),
|
||||
ire, message_contains(expect));
|
||||
BOOST_REQUIRE_EXCEPTION(q("select * from t where ck1=null allow filtering"), ire, message_contains(expect));
|
||||
BOOST_REQUIRE_EXCEPTION(q("select * from t where r=null and ck1=null allow filtering"),
|
||||
ire, message_contains(expect));
|
||||
require_rows(e, "select * from t where pk1=0 and pk2=0 and ck1<null", {});
|
||||
require_rows(e, "select * from t where r>null and ck1<null allow filtering", {});
|
||||
cquery_nofail(e, "insert into t(pk1,pk2,ck1,ck2) values(11,21,101,201)");
|
||||
require_rows(e, "select * from t where r=null allow filtering", {});
|
||||
cquery_nofail(e, "insert into t(pk1,pk2,ck1,ck2,r) values(11,21,101,202,'2')");
|
||||
require_rows(e, "select * from t where r>null allow filtering", {});
|
||||
require_rows(e, "select * from t where r<=null allow filtering", {});
|
||||
require_rows(e, "select * from t where pk1=null allow filtering", {});
|
||||
|
||||
cquery_nofail(e, "create table tb (p int primary key)");
|
||||
BOOST_REQUIRE_EXCEPTION(q("select * from tb where p=null"), ire, message_contains(expect));
|
||||
}).get();
|
||||
}
|
||||
|
||||
/// Creates a tuple value from individual values.
|
||||
bytes make_tuple(std::vector<data_type> types, std::vector<data_value> values) {
|
||||
const auto tuple_type = tuple_type_impl::get_instance(std::move(types));
|
||||
return tuple_type->decompose(
|
||||
make_tuple_value(tuple_type, tuple_type_impl::native_type(std::move(values))));
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(multi_col_eq) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, c1 text, c2 float, primary key (p, c1, c2))");
|
||||
cquery_nofail(e, "insert into t (p, c1, c2) values (1, 'one', 11);");
|
||||
cquery_nofail(e, "insert into t (p, c1, c2) values (2, 'two', 12);");
|
||||
require_rows(e, "select c2 from t where p=1 and (c1,c2)=('one',11)", {{F(11)}});
|
||||
require_rows(e, "select c1 from t where p=1 and (c1)=('one')", {{T("one")}});
|
||||
require_rows(e, "select c2 from t where p=2 and (c1,c2)=('one',11)", {});
|
||||
require_rows(e, "select p from t where (c1,c2)=('two',12) allow filtering", {{I(2), T("two"), F(12)}});
|
||||
require_rows(e, "select c2 from t where (c1,c2)=('one',12) allow filtering", {});
|
||||
require_rows(e, "select c2 from t where (c1,c2)=('two',11) allow filtering", {});
|
||||
require_rows(e, "select c1 from t where (c1)=('one') allow filtering", {{T("one")}});
|
||||
require_rows(e, "select c1 from t where (c1)=('x') allow filtering", {});
|
||||
auto stmt = e.prepare("select p from t where (c1,c2)=:t allow filtering").get0();
|
||||
require_rows(e, stmt, {{"t"}}, {make_tuple({utf8_type, float_type}, {sstring("two"), 12.f})},
|
||||
{{I(2), T("two"), F(12)}});
|
||||
require_rows(e, stmt, {{"t"}}, {make_tuple({utf8_type, float_type}, {sstring("x"), 12.f})}, {});
|
||||
stmt = e.prepare("select p from t where (c1,c2)=('two',?) allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {F(12)}, {{I(2), T("two"), F(12)}});
|
||||
require_rows(e, stmt, {}, {F(99)}, {});
|
||||
stmt = e.prepare("select c1 from t where (c1)=? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type}, {sstring("one")})}, {{T("one")}});
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type}, {sstring("two")})}, {{T("two")}});
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type}, {sstring("three")})}, {});
|
||||
stmt = e.prepare("select c1 from t where (c1)=(:c1) allow filtering").get0();
|
||||
require_rows(e, stmt, {{"c1"}}, {T("one")}, {{T("one")}});
|
||||
require_rows(e, stmt, {{"c1"}}, {T("two")}, {{T("two")}});
|
||||
require_rows(e, stmt, {{"c1"}}, {T("three")}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(multi_col_slice) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, c1 text, c2 float, primary key (p, c1, c2))");
|
||||
cquery_nofail(e, "insert into t (p, c1, c2) values (1, 'a', 11);");
|
||||
cquery_nofail(e, "insert into t (p, c1, c2) values (2, 'b', 2);");
|
||||
cquery_nofail(e, "insert into t (p, c1, c2) values (3, 'c', 13);");
|
||||
require_rows(e, "select c2 from t where (c1,c2)>('a',20) allow filtering", {{F(2), T("b")}, {F(13), T("c")}});
|
||||
require_rows(e, "select p from t where (c1,c2)>=('a',20) and (c1,c2)<('b',3) allow filtering",
|
||||
{{I(2), T("b"), F(2)}});
|
||||
require_rows(e, "select * from t where (c1,c2)<('a',11) allow filtering", {});
|
||||
require_rows(e, "select c1 from t where (c1,c2)<('a',12) allow filtering", {{T("a"), F(11)}});
|
||||
require_rows(e, "select c1 from t where (c1)>=('c') allow filtering", {{T("c")}});
|
||||
require_rows(e, "select c1 from t where (c1,c2)<=('c',13) allow filtering",
|
||||
{{T("a"), F(11)}, {T("b"), F(2)}, {T("c"), F(13)}});
|
||||
require_rows(e, "select c1 from t where (c1,c2)>=('b',2) and (c1,c2)<=('b',2) allow filtering",
|
||||
{{T("b"), F(2)}});
|
||||
auto stmt = e.prepare("select c1 from t where (c1,c2)<? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type, float_type}, {sstring("a"), 12.f})}, {{T("a"), F(11)}});
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type, float_type}, {sstring("a"), 11.f})}, {});
|
||||
stmt = e.prepare("select c1 from t where (c1,c2)<('a',:c2) allow filtering").get0();
|
||||
require_rows(e, stmt, {{"c2"}}, {F(12)}, {{T("a"), F(11)}});
|
||||
require_rows(e, stmt, {{"c2"}}, {F(11)}, {});
|
||||
stmt = e.prepare("select c1 from t where (c1)>=? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type}, {sstring("c")})}, {{T("c")}});
|
||||
require_rows(e, stmt, {}, {make_tuple({utf8_type}, {sstring("x")})}, {});
|
||||
stmt = e.prepare("select c1 from t where (c1)>=(:c1) allow filtering").get0();
|
||||
require_rows(e, stmt, {{"c1"}}, {T("c")}, {{T("c")}});
|
||||
require_rows(e, stmt, {{"c1"}}, {T("x")}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(multi_col_slice_reversed) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, c1 int, c2 float, primary key (p, c1, c2)) "
|
||||
"with clustering order by (c1 desc, c2 asc)");
|
||||
cquery_nofail(e, "insert into t(p,c1,c2) values (1,11,21)");
|
||||
cquery_nofail(e, "insert into t(p,c1,c2) values (1,12,22)");
|
||||
cquery_nofail(e, "insert into t(p,c1,c2) values (1,12,23)");
|
||||
require_rows(e, "select c1 from t where (c1,c2)>(10,99) allow filtering",
|
||||
{{I(11), F(21)}, {I(12), F(22)}, {I(12), F(23)}});
|
||||
require_rows(e, "select c1 from t where (c1,c2)<(12,0) allow filtering", {{I(11), F(21)}});
|
||||
require_rows(e, "select c1 from t where (c1,c2)>(12,22) allow filtering", {{I(12), F(23)}});
|
||||
require_rows(e, "select c1 from t where (c1)>(12) allow filtering", {});
|
||||
require_rows(e, "select c1 from t where (c1)<=(12) allow filtering", {{I(11)}, {I(12)}, {I(12)}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(set_contains) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p frozen<set<int>>, c frozen<set<int>>, s set<text>, st set<int> static, primary key (p, c))");
|
||||
require_rows(e, "select * from t where c contains 222 allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (p, c, s) values ({1}, {11, 12}, {'a1', 'b1'})");
|
||||
cquery_nofail(e, "insert into t (p, c, s) values ({2}, {21, 22}, {'a2', 'b1'})");
|
||||
cquery_nofail(e, "insert into t (p, c, s) values ({1, 3}, {31, 32}, {'a3', 'b3'})");
|
||||
require_rows(e, "select * from t where s contains 'xyz' allow filtering", {});
|
||||
require_rows(e, "select * from t where p contains 999 allow filtering", {});
|
||||
require_rows(e, "select p from t where p contains 3 allow filtering", {{SI({1, 3})}});
|
||||
require_rows(e, "select p from t where p contains 1 allow filtering", {{SI({1, 3})}, {SI({1})}});
|
||||
require_rows(e, "select p from t where p contains null allow filtering",
|
||||
{{SI({1, 3})}, {SI({1})}, {SI({2})}});
|
||||
require_rows(e, "select p from t where p contains 1 and s contains 'a1' allow filtering",
|
||||
{{SI({1}), ST({"a1", "b1"})}});
|
||||
require_rows(e, "select c from t where c contains 31 allow filtering", {{SI({31, 32})}});
|
||||
require_rows(e, "select c from t where c contains null allow filtering",
|
||||
{{SI({11, 12})}, {SI({21, 22})}, {SI({31, 32})}});
|
||||
require_rows(e, "select c from t where c contains 11 and p contains 1 allow filtering",
|
||||
{{SI({11, 12}), SI({1})}});
|
||||
require_rows(e, "select s from t where s contains 'a1' allow filtering", {{ST({"a1", "b1"})}});
|
||||
require_rows(e, "select s from t where s contains 'b1' allow filtering",
|
||||
{{ST({"a1", "b1"})}, {ST({"a2", "b1"})}});
|
||||
require_rows(e, "select s from t where s contains null allow filtering",
|
||||
{{ST({"a1", "b1"})}, {ST({"a2", "b1"})}, {ST({"a3", "b3"})}});
|
||||
// TODO: uncomment when #6797 is fixed:
|
||||
// require_rows(e, "select s from t where s contains 'b1' and s contains '' allow filtering", {});
|
||||
require_rows(e, "select s from t where s contains 'b1' and p contains 4 allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (p, c, st) values ({4}, {41}, {104})");
|
||||
require_rows(e, "select st from t where st contains 4 allow filtering", {});
|
||||
require_rows(e, "select st from t where st contains 104 allow filtering", {{SI({104})}});
|
||||
cquery_nofail(e, "insert into t (p, c, st) values ({4}, {42}, {105})");
|
||||
require_rows(e, "select c from t where st contains 104 allow filtering", {});
|
||||
require_rows(e, "select c from t where st contains 105 allow filtering",
|
||||
{{SI({41}), SI({105})}, {SI({42}), SI({105})}});
|
||||
cquery_nofail(e, "insert into t (p, c, st) values ({5}, {52}, {104, 105})");
|
||||
require_rows(e, "select p from t where st contains 105 allow filtering",
|
||||
{{SI({4}), SI({105})}, {SI({4}), SI({105})}, {SI({5}), SI({104, 105})}});
|
||||
require_rows(e, "select p from t where st contains null allow filtering",
|
||||
{{SI({4}), SI({105})}, {SI({4}), SI({105})}, {SI({5}), SI({104, 105})}});
|
||||
cquery_nofail(e, "delete from t where p={4}");
|
||||
require_rows(e, "select p from t where st contains 105 allow filtering", {{SI({5}), SI({104, 105})}});
|
||||
const auto stmt = e.prepare("select p from t where p contains :p allow filtering").get0();
|
||||
require_rows(e, stmt, {{"p"}}, {I(999)}, {});
|
||||
require_rows(e, stmt, {{"p"}}, {I(1)}, {{SI({1})}, {SI({1, 3})}});
|
||||
require_rows(e, stmt, {{"p"}}, {I(2)}, {{SI({2})}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(list_contains) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p frozen<list<int>>, c frozen<list<int>>, ls list<int>, st list<text> static,"
|
||||
"primary key(p, c))");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([1], [11,12,13])");
|
||||
cquery_nofail(e, "insert into t (p, c, ls) values ([2], [21,22,23], [102])");
|
||||
cquery_nofail(e, "insert into t (p, c, ls, st) values ([3], [21,32,33], [103], ['a', 'b'])");
|
||||
cquery_nofail(e, "insert into t (p, c, st) values ([4], [41,42,43], ['a'])");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([4], [41,42])");
|
||||
require_rows(e, "select p from t where p contains 222 allow filtering", {});
|
||||
require_rows(e, "select p from t where c contains 222 allow filtering", {});
|
||||
require_rows(e, "select p from t where ls contains 222 allow filtering", {});
|
||||
require_rows(e, "select p from t where st contains 'xyz' allow filtering", {});
|
||||
require_rows(e, "select p from t where p contains 1 allow filtering", {{LI({1})}});
|
||||
require_rows(e, "select p from t where p contains 4 allow filtering", {{LI({4})}, {LI({4})}});
|
||||
require_rows(e, "select p from t where p contains null allow filtering",
|
||||
{{LI({1})}, {LI({2})}, {LI({3})}, {LI({4})}, {LI({4})}});
|
||||
require_rows(e, "select c from t where c contains 22 allow filtering", {{LI({21,22,23})}});
|
||||
require_rows(e, "select c from t where c contains 21 allow filtering", {{LI({21,22,23})}, {LI({21,32,33})}});
|
||||
require_rows(e, "select c from t where c contains null allow filtering",
|
||||
{{LI({11,12,13})}, {LI({21,22,23})}, {LI({21,32,33})}, {LI({41,42,43})}, {LI({41,42})}});
|
||||
require_rows(e, "select c from t where c contains 21 and ls contains 102 allow filtering",
|
||||
{{LI({21,22,23}), LI({102})}});
|
||||
require_rows(e, "select ls from t where ls contains 102 allow filtering", {{LI({102})}});
|
||||
require_rows(e, "select ls from t where ls contains null allow filtering", {{LI({102})}, {LI({103})}});
|
||||
require_rows(e, "select st from t where st contains 'a' allow filtering",
|
||||
{{LT({"a"})}, {LT({"a"})}, {LT({"a", "b"})}});
|
||||
require_rows(e, "select st from t where st contains null allow filtering",
|
||||
{{LT({"a"})}, {LT({"a"})}, {LT({"a", "b"})}});
|
||||
require_rows(e, "select st from t where st contains 'b' allow filtering", {{LT({"a", "b"})}});
|
||||
cquery_nofail(e, "delete from t where p=[2]");
|
||||
require_rows(e, "select c from t where c contains 21 allow filtering", {{LI({21,32,33})}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(map_contains) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p frozen<map<int,int>>, c frozen<map<int,int>>, m map<int,int>,"
|
||||
"s map<int,int> static, primary key(p, c))");
|
||||
cquery_nofail(e, "insert into t (p, c, m) values ({1:1}, {10:10}, {1:11, 2:12})");
|
||||
require_rows(e, "select * from t where m contains 21 allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (p, c, m) values ({2:2}, {20:20}, {1:21, 2:12})");
|
||||
cquery_nofail(e, "insert into t (p, c) values ({3:3}, {30:30})");
|
||||
cquery_nofail(e, "insert into t (p, c, s) values ({3:3}, {31:31}, {3:100})");
|
||||
cquery_nofail(e, "insert into t (p, c, s) values ({4:4}, {40:40}, {4:100})");
|
||||
const auto my_map_type = map_type_impl::get_instance(int32_type, int32_type, true);
|
||||
const auto p3 = my_map_type->decompose(make_map_value(my_map_type, map_type_impl::native_type({{3, 3}})));
|
||||
require_rows(e, "select p from t where p contains 3 allow filtering", {{p3}, {p3}});
|
||||
const auto p1 = my_map_type->decompose(make_map_value(my_map_type, map_type_impl::native_type({{1, 1}})));
|
||||
const auto p2 = my_map_type->decompose(make_map_value(my_map_type, map_type_impl::native_type({{2, 2}})));
|
||||
const auto p4 = my_map_type->decompose(make_map_value(my_map_type, map_type_impl::native_type({{4, 4}})));
|
||||
require_rows(e, "select p from t where p contains null allow filtering", {{p1}, {p2}, {p3}, {p3}, {p4}});
|
||||
const auto c4 = my_map_type->decompose(make_map_value(my_map_type, map_type_impl::native_type({{40, 40}})));
|
||||
require_rows(e, "select c from t where c contains 40 allow filtering", {{c4}});
|
||||
const auto m2 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 21}, {2, 12}})));
|
||||
require_rows(e, "select m from t where m contains 21 allow filtering", {{m2}});
|
||||
const auto m1 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{1, 11}, {2, 12}})));
|
||||
require_rows(e, "select m from t where m contains 11 allow filtering", {{m1}});
|
||||
require_rows(e, "select m from t where m contains 12 allow filtering", {{m1}, {m2}});
|
||||
require_rows(e, "select m from t where m contains null allow filtering", {{m1}, {m2}});
|
||||
require_rows(e, "select m from t where m contains 11 and m contains 12 allow filtering", {{m1}});
|
||||
cquery_nofail(e, "delete from t where p={2:2}");
|
||||
require_rows(e, "select m from t where m contains 12 allow filtering", {{m1}});
|
||||
const auto s3 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{3, 100}})));
|
||||
const auto s4 = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{4, 100}})));
|
||||
require_rows(e, "select s from t where s contains 100 allow filtering", {{s3}, {s3}, {s4}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(contains_key) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e,
|
||||
"create table t (p frozen<map<int,int>>, c frozen<map<text,int>>, m map<int,int>, "
|
||||
"s map<int,text> static, primary key(p, c))");
|
||||
cquery_nofail(e, "insert into t (p,c,m) values ({1:11, 2:12}, {'el':11, 'twel':12}, {11:11, 12:12})");
|
||||
require_rows(e, "select * from t where p contains key 3 allow filtering", {});
|
||||
require_rows(e, "select * from t where c contains key 'x' allow filtering", {});
|
||||
require_rows(e, "select * from t where m contains key 3 allow filtering", {});
|
||||
require_rows(e, "select * from t where s contains key 3 allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (p,c,m) values ({3:33}, {'th':33}, {11:33})");
|
||||
const auto int_map_type = map_type_impl::get_instance(int32_type, int32_type, true);
|
||||
const auto m1 = int_map_type->decompose(
|
||||
make_map_value(int_map_type, map_type_impl::native_type({{11, 11}, {12, 12}})));
|
||||
const auto m3 = int_map_type->decompose(make_map_value(int_map_type, map_type_impl::native_type({{11, 33}})));
|
||||
require_rows(e, "select m from t where m contains key 12 allow filtering", {{m1}});
|
||||
require_rows(e, "select m from t where m contains key 11 allow filtering", {{m1}, {m3}});
|
||||
require_rows(e, "select m from t where m contains key null allow filtering", {{m1}, {m3}});
|
||||
const auto text_map_type = map_type_impl::get_instance(utf8_type, int32_type, true);
|
||||
const auto c1 = text_map_type->decompose(
|
||||
make_map_value(text_map_type, map_type_impl::native_type({{"el", 11}, {"twel", 12}})));
|
||||
require_rows(e, "select c from t where c contains key 'el' allow filtering", {{c1}});
|
||||
require_rows(e, "select c from t where c contains key 'twel' allow filtering", {{c1}});
|
||||
const auto c3 = text_map_type->decompose(
|
||||
make_map_value(text_map_type, map_type_impl::native_type({{"th", 33}})));
|
||||
require_rows(e, "select c from t where c contains key null allow filtering", {{c1}, {c3}});
|
||||
const auto p3 = int_map_type->decompose(make_map_value(int_map_type, map_type_impl::native_type({{3, 33}})));
|
||||
require_rows(e, "select p from t where p contains key 3 allow filtering", {{p3}});
|
||||
require_rows(e, "select p from t where p contains key 3 and m contains key null allow filtering", {{p3, m3}});
|
||||
const auto p1 = int_map_type->decompose(
|
||||
make_map_value(int_map_type, map_type_impl::native_type({{1, 11}, {2, 12}})));
|
||||
require_rows(e, "select p from t where p contains key null allow filtering", {{p1}, {p3}});
|
||||
cquery_nofail(e, "insert into t (p,c) values ({4:44}, {'aaaa':44})");
|
||||
require_rows(e, "select m from t where m contains key 12 allow filtering", {{m1}});
|
||||
cquery_nofail(e, "delete from t where p={1:11, 2:12}");
|
||||
require_rows(e, "select m from t where m contains key 12 allow filtering", {});
|
||||
require_rows(e, "select s from t where s contains key 55 allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (p,c,s) values ({5:55}, {'aaaa':55}, {55:'aaaa'})");
|
||||
cquery_nofail(e, "insert into t (p,c,s) values ({5:55}, {'aaa':55}, {55:'aaaa'})");
|
||||
const auto int_text_map_type = map_type_impl::get_instance(int32_type, utf8_type, true);
|
||||
const auto s5 = int_text_map_type->decompose(
|
||||
make_map_value(int_text_map_type, map_type_impl::native_type({{55, "aaaa"}})));
|
||||
require_rows(e, "select s from t where s contains key 55 allow filtering", {{s5}, {s5}});
|
||||
require_rows(e, "select s from t where s contains key 55 and s contains key null allow filtering",
|
||||
{{s5}, {s5}});
|
||||
const auto c51 = text_map_type->decompose(
|
||||
make_map_value(text_map_type, map_type_impl::native_type({{"aaaa", 55}})));
|
||||
const auto c52 = text_map_type->decompose(
|
||||
make_map_value(text_map_type, map_type_impl::native_type({{"aaa", 55}})));
|
||||
require_rows(e, "select c from t where s contains key 55 allow filtering", {{c51, s5}, {c52, s5}});
|
||||
cquery_nofail(e, "insert into t (p,c,s) values ({6:66}, {'bbb':66}, {66:'bbbb', 55:'bbbb'})");
|
||||
const auto p5 = int_map_type->decompose(make_map_value(int_map_type, map_type_impl::native_type({{5, 55}})));
|
||||
const auto p6 = int_map_type->decompose(make_map_value(int_map_type, map_type_impl::native_type({{6, 66}})));
|
||||
const auto s6 = int_text_map_type->decompose(
|
||||
make_map_value(int_text_map_type, map_type_impl::native_type({{55, "bbbb"}, {66, "bbbb"}})));
|
||||
require_rows(e, "select p from t where s contains key 55 allow filtering", {{p5, s5}, {p5, s5}, {p6, s6}});
|
||||
const auto stmt = e.prepare("select p from t where s contains key :k allow filtering").get0();
|
||||
require_rows(e, stmt, {{"k"}}, {I(55)}, {{p5, s5}, {p5, s5}, {p6, s6}});
|
||||
require_rows(e, stmt, {{"k"}}, {I(999)}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(like) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (pk text, ck1 text, ck2 text, r text, s text static, primary key (pk, ck1, ck2))");
|
||||
require_rows(e, "select * from t where pk like 'a' allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (pk, ck1, ck2) values ('pa', 'c1a', 'c2a');");
|
||||
require_rows(e, "select * from t where pk like 'a' allow filtering", {});
|
||||
require_rows(e, "select pk from t where pk like '_a' allow filtering", {{T("pa")}});
|
||||
require_rows(e, "select pk from t where pk like '_a' and ck1 like '' allow filtering", {});
|
||||
require_rows(e, "select pk from t where r like '_a' allow filtering", {});
|
||||
require_rows(e, "select pk from t where pk like '_a' and ck2 like '_2%' allow filtering",
|
||||
{{T("pa"), T("c2a")}});
|
||||
cquery_nofail(e, "insert into t (pk, ck1, ck2, r, s) values ('pb', 'c1b', 'c2b', 'rb', 'sb');");
|
||||
require_rows(e, "select pk from t where pk like '_a' allow filtering", {{T("pa")}});
|
||||
require_rows(e, "select r from t where r like '_a' allow filtering", {});
|
||||
require_rows(e, "select r from t where r like '_b' allow filtering", {{T("rb")}});
|
||||
cquery_nofail(e, "insert into t (pk, ck1, ck2, r) values ('pb', 'c1ba', 'c2ba', 'rba');");
|
||||
require_rows(e, "select r from t where r like 'rb%' allow filtering", {{T("rb")}, {T("rba")}});
|
||||
require_rows(e, "select pk from t where s like '_b%' allow filtering",
|
||||
{{T("pb"), T("sb")}, {T("pb"), T("sb")}});
|
||||
cquery_nofail(e, "insert into t (pk, ck1, ck2, r, s) values ('pc', 'c1c', 'c2c', 'rc', 'sc');");
|
||||
require_rows(e, "select s from t where s like 's%' allow filtering", {{T("sb")}, {T("sb")}, {T("sc")}});
|
||||
require_rows(e, "select r from t where ck1 like '' allow filtering", {});
|
||||
require_rows(e, "select ck1 from t where ck1 like '%c' allow filtering", {{T("c1c")}});
|
||||
require_rows(e, "select ck2 from t where ck2 like 'c%' allow filtering",
|
||||
{{T("c2a")}, {T("c2b")}, {T("c2ba")}, {T("c2c")}});
|
||||
require_rows(e, "select * from t where ck1 like '' and ck2 like '_2a' allow filtering", {});
|
||||
require_rows(e, "select r from t where r='rb' and ck2 like 'c2_' allow filtering", {{T("rb"), T("c2b")}});
|
||||
const auto stmt = e.prepare("select ck1 from t where ck1 like ? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {T("%c")}, {{T("c1c")}});
|
||||
require_rows(e, stmt, {}, {T("%xyxyz")}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(scalar_in) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, c int, r float, s text static, primary key (p, c))");
|
||||
require_rows(e, "select c from t where c in (11,12,13) allow filtering", {});
|
||||
cquery_nofail(e, "insert into t(p,c) values (1,11)");
|
||||
require_rows(e, "select c from t where c in (11,12,13) allow filtering", {{I(11)}});
|
||||
cquery_nofail(e, "insert into t(p,c,r) values (1,11,21)");
|
||||
cquery_nofail(e, "insert into t(p,c,r) values (2,12,22)");
|
||||
cquery_nofail(e, "insert into t(p,c,r) values (3,13,23)");
|
||||
cquery_nofail(e, "insert into t(p,c,r) values (4,14,24)");
|
||||
cquery_nofail(e, "insert into t(p,c,r,s) values (4,15,24,'34')");
|
||||
cquery_nofail(e, "insert into t(p,c,r,s) values (5,15,25,'35')");
|
||||
require_rows(e, "select c from t where c in (11,12,13) allow filtering", {{I(11)}, {I(12)}, {I(13)}});
|
||||
require_rows(e, "select c from t where c in (11) allow filtering", {{I(11)}});
|
||||
require_rows(e, "select c from t where c in (999) allow filtering", {});
|
||||
require_rows(e, "select c from t where c in (11,999) allow filtering", {{I(11)}});
|
||||
require_rows(e, "select c from t where c in (11,12,13) and r in (21,24) allow filtering", {{I(11), F(21)}});
|
||||
require_rows(e, "select c from t where c in (11,12,13) and r in (21,22) allow filtering",
|
||||
{{I(11), F(21)}, {I(12), F(22)}});
|
||||
require_rows(e, "select r from t where r in (999) allow filtering", {});
|
||||
require_rows(e, "select r from t where r in (1,2,3) allow filtering", {});
|
||||
require_rows(e, "select r from t where r in (22,25) allow filtering", {{F(22)}, {F(25)}});
|
||||
require_rows(e, "select r from t where r in (22,25) and c < 20 allow filtering",
|
||||
{{F(22), I(12)}, {F(25), I(15)}});
|
||||
require_rows(e, "select r from t where r in (22,25) and s>='25' allow filtering", {{F(25), T("35")}});
|
||||
require_rows(e, "select r from t where r in (25) and s>='25' allow filtering", {{F(25), T("35")}});
|
||||
require_rows(e, "select r from t where r in (25) allow filtering", {{F(25)}});
|
||||
require_rows(e, "select r from t where r in (null,25) allow filtering", {{F(25)}});
|
||||
cquery_nofail(e, "delete from t where p=2");
|
||||
require_rows(e, "select r from t where r in (22,25) allow filtering", {{F(25)}});
|
||||
require_rows(e, "select s from t where s in ('34','35') allow filtering", {{T("34")}, {T("34")}, {T("35")}});
|
||||
require_rows(e, "select s from t where s in ('34','35','999') allow filtering",
|
||||
{{T("34")}, {T("34")}, {T("35")}});
|
||||
require_rows(e, "select s from t where s in ('34') allow filtering", {{T("34")}, {T("34")}});
|
||||
require_rows(e, "select s from t where s in ('34','35') and r=24 allow filtering",
|
||||
{{T("34"), F(24)}, {T("34"), F(24)}});
|
||||
const auto stmt = e.prepare("select r from t where r in ? allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {LF({99.f, 88.f, 77.f})}, {});
|
||||
require_rows(e, stmt, {}, {LF({21.f})}, {{F(21)}});
|
||||
require_rows(e, stmt, {}, {LF({21.f, 22.f, 23.f})}, {{F(21)}, {F(23)}});
|
||||
require_rows(e, stmt, {}, {LF({24.f, 25.f})}, {{F(24)}, {F(24)}, {F(25)}});
|
||||
require_rows(e, stmt, {}, {LF({25.f, data_value::make_null(float_type)})}, {{F(25)}});
|
||||
require_rows(e, stmt, {}, {LF({99.f, data_value::make_null(float_type)})}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(list_in) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p frozen<list<int>>, c frozen<list<int>>, primary key(p, c))");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([1], [11,12,13])");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([2], [21,22,23])");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([3], [31,32,33])");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([4], [41,42,43])");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([4], [])");
|
||||
cquery_nofail(e, "insert into t (p, c) values ([5], [51,52,53])");
|
||||
require_rows(e, "select c from t where c in ([11,12],[11,13]) allow filtering", {});
|
||||
require_rows(e, "select c from t where c in ([11,12,13],[11,13,12]) allow filtering",
|
||||
{{LI({11,12,13})}});
|
||||
require_rows(e, "select c from t where c in ([11,12,13],[11,13,12],[41,42,43]) allow filtering",
|
||||
{{LI({11,12,13})}, {LI({41,42,43})}});
|
||||
require_rows(e, "select c from t where p in ([1],[2],[4]) and c in ([11,12,13], [41,42,43]) allow filtering",
|
||||
{{LI({11,12,13})}, {LI({41,42,43})}});
|
||||
require_rows(e, "select c from t where c in ([],[11,13,12]) allow filtering", {{LI({})}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(set_in) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p frozen<set<int>>, c frozen<set<int>>, r text, primary key (p, c))");
|
||||
require_rows(e, "select * from t where c in ({222}) allow filtering", {});
|
||||
cquery_nofail(e, "insert into t (p, c) values ({1,11}, {21,201})");
|
||||
cquery_nofail(e, "insert into t (p, c, r) values ({1,11}, {22,202}, '2')");
|
||||
require_rows(e, "select * from t where c in ({222}, {21}) allow filtering", {});
|
||||
require_rows(e, "select c from t where c in ({222}, {21,201}) allow filtering", {{SI({21, 201})}});
|
||||
require_rows(e, "select c from t where c in ({22,202}, {21,201}) allow filtering",
|
||||
{{SI({21, 201})}, {SI({22, 202})}});
|
||||
require_rows(e, "select c from t where c in ({222}, {21,201}) and r='' allow filtering", {});
|
||||
require_rows(e, "select c from t where c in ({222}, {21,201}) and r='x' allow filtering", {});
|
||||
require_rows(e, "select c from t where c in ({22,202}, {21,201}) and r='2' allow filtering",
|
||||
{{SI({22, 202}), T("2")}});
|
||||
require_rows(e, "select c from t where c in ({22,202}, {21,201}) and p in ({1,11}, {222}) allow filtering",
|
||||
{{SI({21, 201})}, {SI({22, 202})}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(map_in) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p frozen<map<int,int>>, c frozen<map<int,int>>, r int, primary key(p, c))");
|
||||
cquery_nofail(e, "insert into t (p, c) values ({1:1}, {10:10})");
|
||||
cquery_nofail(e, "insert into t (p, c, r) values ({1:1}, {10:10,11:11}, 12)");
|
||||
require_rows(e, "select * from t where c in ({10:11},{10:11},{11:11}) allow filtering", {});
|
||||
const auto my_map_type = map_type_impl::get_instance(int32_type, int32_type, true);
|
||||
const auto c1a = my_map_type->decompose(make_map_value(my_map_type, map_type_impl::native_type({{10, 10}})));
|
||||
require_rows(e, "select c from t where c in ({10:11}, {10:10}, {11:11}) allow filtering", {{c1a}});
|
||||
const auto c1b = my_map_type->decompose(
|
||||
make_map_value(my_map_type, map_type_impl::native_type({{10, 10}, {11, 11}})));
|
||||
require_rows(e, "select c from t where c in ({10:11}, {10:10}, {10:10,11:11}) allow filtering",
|
||||
{{c1a}, {c1b}});
|
||||
require_rows(e, "select c from t where c in ({10:11}, {10:10}, {10:10,11:11}) and r=12 allow filtering",
|
||||
{{c1b, I(12)}});
|
||||
require_rows(e, "select c from t where c in ({10:11}, {10:10}, {10:10,11:11}) and r in (12,null) "
|
||||
"allow filtering", {{c1b, I(12)}});
|
||||
require_rows(e, "select c from t where c in ({10:11}, {10:10}, {10:10,11:11}) and p in ({1:1},{2:2}) "
|
||||
"allow filtering", {{c1a}, {c1b}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(multi_col_in) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (pk int, ck1 int, ck2 float, r text, primary key (pk, ck1, ck2))");
|
||||
require_rows(e, "select ck1 from t where (ck1,ck2) in ((11,21),(12,22)) allow filtering", {});
|
||||
cquery_nofail(e, "insert into t(pk,ck1,ck2) values (1,11,21)");
|
||||
require_rows(e, "select ck1 from t where (ck1,ck2) in ((11,21),(12,22)) allow filtering", {{I(11), F(21)}});
|
||||
require_rows(e, "select ck1 from t where (ck1,ck2) in ((11,21)) allow filtering", {{I(11), F(21)}});
|
||||
cquery_nofail(e, "insert into t(pk,ck1,ck2) values (2,12,22)");
|
||||
require_rows(e, "select ck1 from t where (ck1,ck2) in ((11,21),(12,22)) allow filtering",
|
||||
{{I(11), F(21)}, {I(12), F(22)}});
|
||||
cquery_nofail(e, "insert into t(pk,ck1,ck2) values (3,13,23)");
|
||||
require_rows(e, "select ck1 from t where (ck1,ck2) in ((11,21),(12,22)) allow filtering",
|
||||
{{I(11), F(21)}, {I(12), F(22)}});
|
||||
require_rows(e, "select ck1 from t where (ck1,ck2) in ((13,23)) allow filtering", {{I(13), F(23)}});
|
||||
cquery_nofail(e, "insert into t(pk,ck1,ck2,r) values (4,13,23,'a')");
|
||||
require_rows(e, "select pk from t where (ck1,ck2) in ((13,23)) allow filtering",
|
||||
{{I(3), I(13), F(23)}, {I(4), I(13), F(23)}});
|
||||
require_rows(e, "select pk from t where (ck1) in ((13),(33),(44)) allow filtering",
|
||||
{{I(3), I(13)}, {I(4), I(13)}});
|
||||
// TODO: uncomment when #6200 is fixed.
|
||||
// require_rows(e, "select pk from t where (ck1,ck2) in ((13,23)) and r='a' allow filtering",
|
||||
// {{I(4), I(13), F(23), T("a")}});
|
||||
cquery_nofail(e, "delete from t where pk=4");
|
||||
require_rows(e, "select pk from t where (ck1,ck2) in ((13,23)) allow filtering", {{I(3), I(13), F(23)}});
|
||||
auto stmt = e.prepare("select ck1 from t where (ck1,ck2) in ? allow filtering").get0();
|
||||
auto bound_tuples = [] (std::vector<std::tuple<int32_t, float>> tuples) {
|
||||
const auto tuple_type = tuple_type_impl::get_instance({int32_type, float_type});
|
||||
const auto list_type = list_type_impl::get_instance(tuple_type, true);
|
||||
const auto tvals = tuples | transformed([&] (const std::tuple<int32_t, float>& t) {
|
||||
return make_tuple_value(tuple_type, tuple_type_impl::native_type({std::get<0>(t), std::get<1>(t)}));
|
||||
});
|
||||
return list_type->decompose(
|
||||
make_list_value(list_type, std::vector<data_value>(tvals.begin(), tvals.end())));
|
||||
};
|
||||
require_rows(e, stmt, {}, {bound_tuples({{11, 21}})}, {{I(11), F(21)}});
|
||||
require_rows(e, stmt, {}, {bound_tuples({{11, 21}, {11, 99}})}, {{I(11), F(21)}});
|
||||
require_rows(e, stmt, {}, {bound_tuples({{12, 22}})}, {{I(12), F(22)}});
|
||||
require_rows(e, stmt, {}, {bound_tuples({{13, 13}, {12, 22}})}, {{I(12), F(22)}});
|
||||
require_rows(e, stmt, {}, {bound_tuples({{12, 21}})}, {});
|
||||
require_rows(e, stmt, {}, {bound_tuples({{12, 21}, {12, 21}, {13, 21}, {14, 21}})}, {});
|
||||
stmt = e.prepare("select ck1 from t where (ck1,ck2) in (?) allow filtering").get0();
|
||||
auto tpl = [] (int32_t e1, float e2) {
|
||||
return make_tuple({int32_type, float_type}, {e1, e2});
|
||||
};
|
||||
require_rows(e, stmt, {}, {tpl(11, 21)}, {{I(11), F(21)}});
|
||||
require_rows(e, stmt, {}, {tpl(12, 22)}, {{I(12), F(22)}});
|
||||
require_rows(e, stmt, {}, {tpl(12, 21)}, {});
|
||||
stmt = e.prepare("select ck1 from t where (ck1,ck2) in (:t1,:t2) allow filtering").get0();
|
||||
require_rows(e, stmt, {{"t1", "t2"}}, {tpl(11, 21), tpl(12, 22)}, {{I(11), F(21)}, {I(12), F(22)}});
|
||||
require_rows(e, stmt, {{"t1", "t2"}}, {tpl(11, 21), tpl(11, 21)}, {{I(11), F(21)}});
|
||||
require_rows(e, stmt, {{"t1", "t2"}}, {tpl(11, 21), tpl(99, 99)}, {{I(11), F(21)}});
|
||||
require_rows(e, stmt, {{"t1", "t2"}}, {tpl(9, 9), tpl(99, 99)}, {});
|
||||
// Parsing error:
|
||||
// stmt = e.prepare("select ck1 from t where (ck1,ck2) in ((13,23),:p1) allow filtering").get0();
|
||||
stmt = e.prepare("select ck1 from t where (ck1,ck2) in ((13,23),(?,?)) allow filtering").get0();
|
||||
require_rows(e, stmt, {}, {I(0), F(0)}, {{I(13), F(23)}});
|
||||
require_rows(e, stmt, {}, {I(11), F(21)}, {{I(11), F(21)}, {I(13), F(23)}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(bounds) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, c int, primary key (p, c))");
|
||||
cquery_nofail(e, "insert into t (p, c) values (1, 11);");
|
||||
cquery_nofail(e, "insert into t (p, c) values (2, 12);");
|
||||
cquery_nofail(e, "insert into t (p, c) values (3, 13);");
|
||||
require_rows(e, "select p from t where p=1 and c > 10", {{I(1)}});
|
||||
require_rows(e, "select p from t where p=1 and c = 11", {{I(1)}});
|
||||
require_rows(e, "select p from t where p=1 and (c) >= (10)", {{I(1)}});
|
||||
require_rows(e, "select p from t where p=1 and (c) = (11)", {{I(1)}});
|
||||
require_rows(e, "select c from t where p in (1,2,3) and c > 11 and c < 13", {{I(12)}});
|
||||
require_rows(e, "select c from t where p in (1,2,3) and c >= 11 and c < 13", {{I(11)}, {I(12)}});
|
||||
auto stmt = e.prepare("select c from t where p in (1,2,3) and c >= 11 and c < ?").get0();
|
||||
require_rows(e, stmt, {}, {I(13)}, {{I(11)}, {I(12)}});
|
||||
require_rows(e, stmt, {}, {I(10)}, {});
|
||||
stmt = e.prepare("select c from t where p in (1,2,3) and (c) < ?").get0();
|
||||
require_rows(e, stmt, {}, {make_tuple({int32_type}, {13})}, {{I(11)}, {I(12)}});
|
||||
require_rows(e, stmt, {}, {make_tuple({int32_type}, {11})}, {});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(bounds_reversed) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (pk int, ck1 int, ck2 int, primary key (pk, ck1, ck2)) "
|
||||
"with clustering order by (ck1 asc, ck2 desc)");
|
||||
cquery_nofail(e, "insert into t (pk,ck1,ck2) values (1,11,21);");
|
||||
cquery_nofail(e, "insert into t (pk,ck1,ck2) values (2,12,22);");
|
||||
require_rows(e, "select pk from t where pk=1 and ck1>10", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and ck1=11", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1)>=(10)", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1,ck2)>=(10,30)", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1,ck2)>=(10,30) and (ck1)<(20)", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1,ck2)>=(10,20) and (ck1,ck2)<(20,21)", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1,ck2)>=(10,20) and (ck1,ck2)<=(11,20)", {});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1)=(11)", {{I(1)}});
|
||||
require_rows(e, "select pk from t where pk=1 and (ck1,ck2)=(11,21)", {{I(1)}});
|
||||
cquery_nofail(e, "insert into t (pk,ck1,ck2) values (2,12,23);");
|
||||
require_rows(e, "select ck1 from t where pk in (1,2,3) and ck1=12 and ck2<23", {{I(12)}});
|
||||
require_rows(e, "select ck1 from t where pk in (1,2,3) and ck1=12 and ck2<24", {{I(12)}, {I(12)}});
|
||||
}).get();
|
||||
}
|
||||
|
||||
SEASTAR_THREAD_TEST_CASE(token) {
|
||||
do_with_cql_env_thread([](cql_test_env& e) {
|
||||
cquery_nofail(e, "create table t (p int, q int, r int, primary key ((p, q)))");
|
||||
cquery_nofail(e, "insert into t (p,q,r) values (1,11,101);");
|
||||
cquery_nofail(e, "insert into t (p,q,r) values (2,12,102);");
|
||||
cquery_nofail(e, "insert into t (p,q,r) values (3,13,103);");
|
||||
require_rows(e, "select p from t where token(p,q) = token(1,11)", {{I(1)}});
|
||||
require_rows(e, "select p from t where token(p,q) >= token(1,11) and token(p,q) <= token(1,11)", {{I(1)}});
|
||||
|
||||
// WARNING: the following two cases rely on having no token collisions, which cannot be guaranteed.
|
||||
// Keeping them because (absent collisions) they complete code coverage, guarding against
|
||||
// hard-to-trigger bugs.
|
||||
require_rows(e, "select p from t where token(p,q) = token(1,11) and token(p,q) = token(2,12)", {});
|
||||
require_rows(e, "select p from t where token(p,q) <= token(1,11) and token(p,q) = token(2,12)", {{I(2)}});
|
||||
|
||||
require_rows(e, "select p from t where token(p,q) > token(9,9) and token(p,q) < token(9,9)", {});
|
||||
const auto min_bounds = format("select p from t where token(p,q) > {:d} and token(p,q) < {:d}",
|
||||
std::numeric_limits<int64_t>::min(), std::numeric_limits<int64_t>::min());
|
||||
require_rows(e, min_bounds, {{I(1)}, {I(2)}, {I(3)}});
|
||||
require_rows(e, "select p from t where token(p,q) <= token(1,11) and r<102 allow filtering",
|
||||
{{I(1), I(101)}});
|
||||
require_rows(e, "select p from t where token(p,q) = token(2,12) and r<102 allow filtering", {});
|
||||
const auto stmt = e.prepare("select p from t where token(p,q) = token(1,?)").get0();
|
||||
require_rows(e, stmt, {}, {I(11)}, {{I(1)}});
|
||||
require_rows(e, stmt, {}, {I(10)}, {});
|
||||
}).get();
|
||||
}
|
||||
@@ -507,11 +507,6 @@ SEASTAR_TEST_CASE(test_nonfrozen_user_types_prepared) {
|
||||
e.execute_prepared(id, vs).discard_result().get();
|
||||
};
|
||||
|
||||
auto query_prepared = [&] (const sstring& cql, const std::vector<cql3::raw_value>& vs) {
|
||||
auto id = e.prepare(cql).get0();
|
||||
return e.execute_prepared(id, vs).get0();
|
||||
};
|
||||
|
||||
auto mk_int = [] (int x) {
|
||||
return cql3::raw_value::make_value(int32_type->decompose(x));
|
||||
};
|
||||
@@ -525,17 +520,6 @@ SEASTAR_TEST_CASE(test_nonfrozen_user_types_prepared) {
|
||||
return cql3::raw_value::make_value(type->decompose(make_tuple_value(type, vs)));
|
||||
};
|
||||
|
||||
auto mk_ut_list = [&] (const std::vector<std::vector<data_value>>& vss) {
|
||||
std::vector<data_value> ut_vs;
|
||||
for (const auto& vs: vss) {
|
||||
ut_vs.push_back(make_user_value(ut, vs));
|
||||
}
|
||||
|
||||
const auto& ut_list_type = list_type_impl::get_instance(ut, true);
|
||||
return cql3::raw_value::make_value(
|
||||
ut_list_type->decompose(make_list_value(ut_list_type, list_type_impl::native_type(ut_vs))));
|
||||
};
|
||||
|
||||
auto text_null = data_value::make_null(utf8_type);
|
||||
auto long_null = data_value::make_null(long_type);
|
||||
|
||||
@@ -560,11 +544,29 @@ SEASTAR_TEST_CASE(test_nonfrozen_user_types_prepared) {
|
||||
mk_null_row(3),
|
||||
});
|
||||
|
||||
#if 0 // TODO: fix dependence on #6369 incorrect behaviour.
|
||||
auto query_prepared = [&] (const sstring& cql, const std::vector<cql3::raw_value>& vs) {
|
||||
auto id = e.prepare(cql).get0();
|
||||
return e.execute_prepared(id, vs).get0();
|
||||
};
|
||||
|
||||
auto mk_ut_list = [&] (const std::vector<std::vector<data_value>>& vss) {
|
||||
std::vector<data_value> ut_vs;
|
||||
for (const auto& vs: vss) {
|
||||
ut_vs.push_back(make_user_value(ut, vs));
|
||||
}
|
||||
|
||||
const auto& ut_list_type = list_type_impl::get_instance(ut, true);
|
||||
return cql3::raw_value::make_value(
|
||||
ut_list_type->decompose(make_list_value(ut_list_type, list_type_impl::native_type(ut_vs))));
|
||||
};
|
||||
|
||||
assert_that(query_prepared("select * from cf where b in ? allow filtering", {mk_ut_list({{1, "text1", long_null}, {}})}))
|
||||
.is_rows().with_rows_ignore_order({
|
||||
mk_row(1, {1, "text1", long_null}),
|
||||
mk_null_row(3),
|
||||
mk_null_row(3), // TODO: drop this element, due to #6369 fix.
|
||||
});
|
||||
#endif // 0
|
||||
|
||||
execute_prepared("insert into cf (a, b) values (?, ?)", {mk_int(4), mk_tuple({4, "text4", int64_t(4)})});
|
||||
assert_that(e.execute_cql("select * from cf where a = 4").get0()).is_rows().with_rows_ignore_order({
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
CREATE TABLE ks.tbl_cnt (pk int PRIMARY KEY, c1 counter, c2 counter);
|
||||
CREATE TABLE ks.tbl_cnt (pk int PRIMARY KEY, c1 counter);
|
||||
|
||||
-- insert some values in one column
|
||||
UPDATE ks.tbl_cnt SET c1 = c1+1 WHERE pk = 1;
|
||||
@@ -17,11 +17,6 @@ SELECT pk, c1 FROM ks.tbl_cnt WHERE c1 in (-1, 2, 3) ALLOW FILTERING;
|
||||
SELECT pk, c1 FROM ks.tbl_cnt WHERE c1 = 0 ALLOW FILTERING;
|
||||
SELECT pk, c1 FROM ks.tbl_cnt WHERE c1 = 1 ALLOW FILTERING;
|
||||
|
||||
-- now filter through untouched counters `c2` - they should appear as NULLs and evaluate as zeros
|
||||
SELECT pk, c1, c2 FROM ks.tbl_cnt WHERE c2 = 0 ALLOW FILTERING;
|
||||
SELECT pk, c2 FROM ks.tbl_cnt WHERE c2 < 0 ALLOW FILTERING;
|
||||
SELECT pk, c2 FROM ks.tbl_cnt WHERE c2 > 0 ALLOW FILTERING;
|
||||
|
||||
-- delete `c1` and make sure it doesn't appear in filtering results
|
||||
DELETE c1 from ks.tbl_cnt WHERE pk = 1;
|
||||
SELECT pk, c1 FROM ks.tbl_cnt WHERE c1 = 1 ALLOW FILTERING;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
CREATE TABLE ks.tbl_cnt (pk int PRIMARY KEY, c1 counter, c2 counter);
|
||||
CREATE TABLE ks.tbl_cnt (pk int PRIMARY KEY, c1 counter);
|
||||
{
|
||||
"status" : "ok"
|
||||
}
|
||||
@@ -111,38 +111,6 @@ SELECT pk, c1 FROM ks.tbl_cnt WHERE c1 = 1 ALLOW FILTERING;
|
||||
]
|
||||
}
|
||||
|
||||
-- now filter through untouched counters `c2` - they should appear as NULLs and evaluate as zeros
|
||||
SELECT pk, c1, c2 FROM ks.tbl_cnt WHERE c2 = 0 ALLOW FILTERING;
|
||||
{
|
||||
"rows" :
|
||||
[
|
||||
{
|
||||
"c1" : "1",
|
||||
"pk" : "1"
|
||||
},
|
||||
{
|
||||
"c1" : "2",
|
||||
"pk" : "2"
|
||||
},
|
||||
{
|
||||
"c1" : "4",
|
||||
"pk" : "4"
|
||||
},
|
||||
{
|
||||
"c1" : "3",
|
||||
"pk" : "3"
|
||||
}
|
||||
]
|
||||
}
|
||||
SELECT pk, c2 FROM ks.tbl_cnt WHERE c2 < 0 ALLOW FILTERING;
|
||||
{
|
||||
"rows" : null
|
||||
}
|
||||
SELECT pk, c2 FROM ks.tbl_cnt WHERE c2 > 0 ALLOW FILTERING;
|
||||
{
|
||||
"rows" : null
|
||||
}
|
||||
|
||||
-- delete `c1` and make sure it doesn't appear in filtering results
|
||||
DELETE c1 from ks.tbl_cnt WHERE pk = 1;
|
||||
{
|
||||
|
||||
@@ -767,4 +767,6 @@ update lwt set c = 1 where a = 1 and b = 1 and a < 0 if c = 1;
|
||||
update lwt set c = 1 where a > 0 and a < 0 and b = 1 if c = 1;
|
||||
-- error: partition key and IN is not supported
|
||||
update lwt set c = 1 where a in () and b = 1 if c = 1;
|
||||
update lwt set c = 1 where a = 1 and b IN (1, 2) if c = 1;
|
||||
update lwt set c = 1 where a = 1 and (b) IN ((1), (2)) if c = 1;
|
||||
drop table lwt;
|
||||
|
||||
@@ -4868,6 +4868,16 @@ update lwt set c = 1 where a in () and b = 1 if c = 1;
|
||||
"message" : "exceptions::invalid_request_exception (IN on the partition key is not supported with conditional updates)",
|
||||
"status" : "error"
|
||||
}
|
||||
update lwt set c = 1 where a = 1 and b IN (1, 2) if c = 1;
|
||||
{
|
||||
"message" : "exceptions::invalid_request_exception (IN on the clustering key columns is not supported with conditional updates)",
|
||||
"status" : "error"
|
||||
}
|
||||
update lwt set c = 1 where a = 1 and (b) IN ((1), (2)) if c = 1;
|
||||
{
|
||||
"message" : "exceptions::invalid_request_exception (IN on the clustering key columns is not supported with conditional updates)",
|
||||
"status" : "error"
|
||||
}
|
||||
drop table lwt;
|
||||
{
|
||||
"status" : "ok"
|
||||
|
||||
Reference in New Issue
Block a user