Compare commits
177 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ae9a56466 | ||
|
|
0374c1d040 | ||
|
|
9cb0fe3b33 | ||
|
|
a813ff4da2 | ||
|
|
d5936147f4 | ||
|
|
a3d3b4e185 | ||
|
|
4ca2576c98 | ||
|
|
e99a0c7b89 | ||
|
|
f8c7605657 | ||
|
|
7b9e33dcd4 | ||
|
|
d86a31097a | ||
|
|
bd9d6f8e45 | ||
|
|
11ef23e97a | ||
|
|
2c0eac09ae | ||
|
|
713a7269d0 | ||
|
|
1724301d4d | ||
|
|
9971f2f5db | ||
|
|
ee328c22ca | ||
|
|
3a9c9a8a12 | ||
|
|
c03445871a | ||
|
|
565ac1b092 | ||
|
|
7d1180b98f | ||
|
|
f258e6f6ee | ||
|
|
2708b0d664 | ||
|
|
e31ffbf2e6 | ||
|
|
801994e299 | ||
|
|
3b932078bf | ||
|
|
608f62a0e9 | ||
|
|
d8619d3320 | ||
|
|
4f0c99a187 | ||
|
|
ada79df082 | ||
|
|
1935f2b480 | ||
|
|
44a76ed231 | ||
|
|
aeb49f4915 | ||
|
|
8d6b35ad20 | ||
|
|
b123700ebe | ||
|
|
6786b521f9 | ||
|
|
fda0d1ae8e | ||
|
|
e7cffb978a | ||
|
|
79a1c74921 | ||
|
|
3ee854f9fc | ||
|
|
2b65984d14 | ||
|
|
52d1099d09 | ||
|
|
3a03906377 | ||
|
|
2395a240b4 | ||
|
|
d182c595a1 | ||
|
|
fe9c4611b3 | ||
|
|
29df416720 | ||
|
|
1d3c00572c | ||
|
|
9d6e2c5a71 | ||
|
|
386741e3b7 | ||
|
|
d0fdc3960a | ||
|
|
4035cf4f9f | ||
|
|
09367742b1 | ||
|
|
a18ff57b29 | ||
|
|
4734ba21a7 | ||
|
|
425af4c543 | ||
|
|
55f096d01b | ||
|
|
fc79da5912 | ||
|
|
da9e7080ca | ||
|
|
01b0195c22 | ||
|
|
d05b567a40 | ||
|
|
2c11efbbae | ||
|
|
c60d71dc69 | ||
|
|
79930048db | ||
|
|
82b4f4a6c2 | ||
|
|
5b99195d21 | ||
|
|
edde256228 | ||
|
|
3cf28ac18e | ||
|
|
58b65f61c0 | ||
|
|
466cfb0ca6 | ||
|
|
1cd6f50806 | ||
|
|
3f6fe7328a | ||
|
|
f9dd8608eb | ||
|
|
24a80cbf47 | ||
|
|
6e4edc97ad | ||
|
|
81df28b6f3 | ||
|
|
ea6620e9eb | ||
|
|
19be84dafd | ||
|
|
2ff897d351 | ||
|
|
8fc3300739 | ||
|
|
d2ac7d4b18 | ||
|
|
61706a6789 | ||
|
|
65aa531010 | ||
|
|
4bffd0f522 | ||
|
|
9409fc7290 | ||
|
|
86faf1b3ca | ||
|
|
426295bda9 | ||
|
|
c6fde0e562 | ||
|
|
d9f9e7455b | ||
|
|
e95bcd0f8f | ||
|
|
2ff6e2e122 | ||
|
|
1fcf38abd9 | ||
|
|
3375b8b86c | ||
|
|
586546ab32 | ||
|
|
e1d558cb01 | ||
|
|
b0a8f396b4 | ||
|
|
48e7ee374a | ||
|
|
3e85ecd1bd | ||
|
|
930a4af8b3 | ||
|
|
6a6d36058a | ||
|
|
ce57d0174d | ||
|
|
cd11f210ad | ||
|
|
1e2e203cf0 | ||
|
|
1a98c93a25 | ||
|
|
4f4845c94c | ||
|
|
ef745e1ce7 | ||
|
|
ae32aa970a | ||
|
|
a3eb12c5f1 | ||
|
|
b5cedfc177 | ||
|
|
8d9bc57aca | ||
|
|
1cbda629a2 | ||
|
|
baf0201a6e | ||
|
|
7dcffb963c | ||
|
|
dcfaf4d035 | ||
|
|
f974a54cbd | ||
|
|
30a96cc592 | ||
|
|
faf300382a | ||
|
|
55400598ff | ||
|
|
c177295bce | ||
|
|
d95aa77b62 | ||
|
|
fe54009855 | ||
|
|
bbe82236be | ||
|
|
abd73cab78 | ||
|
|
8fd7cf5cd1 | ||
|
|
dd88b2dd18 | ||
|
|
eee4c00e29 | ||
|
|
85071ceeb1 | ||
|
|
4cf201fc24 | ||
|
|
c6ad5cf556 | ||
|
|
51e3e6c655 | ||
|
|
8ac6579b30 | ||
|
|
3744e66244 | ||
|
|
d3bf349484 | ||
|
|
3e6a8ba5bd | ||
|
|
5f1785b9cf | ||
|
|
e1fd6cf989 | ||
|
|
b7328ff1e4 | ||
|
|
602ed43ac7 | ||
|
|
c42c91c5bb | ||
|
|
cf017b320a | ||
|
|
89e79023ae | ||
|
|
bc67da1a21 | ||
|
|
0c7643f1fe | ||
|
|
c563234f40 | ||
|
|
77b7a48a02 | ||
|
|
b2b1bfb159 | ||
|
|
d72cbe37aa | ||
|
|
9f7b560771 | ||
|
|
06af9c028c | ||
|
|
c74ab3ae80 | ||
|
|
32cd3a070a | ||
|
|
bb1554f09e | ||
|
|
2037d7550e | ||
|
|
c320c3f6da | ||
|
|
0ed70944aa | ||
|
|
89f860d409 | ||
|
|
0819d221f4 | ||
|
|
53f47d4e67 | ||
|
|
21ad12669a | ||
|
|
c812359383 | ||
|
|
1bd79705fb | ||
|
|
7e2ef386cc | ||
|
|
51bad7e72c | ||
|
|
0379d0c031 | ||
|
|
a8ef820f27 | ||
|
|
9908f009a4 | ||
|
|
48d8a075b4 | ||
|
|
e3ddd607bc | ||
|
|
511773d466 | ||
|
|
121cd383fa | ||
|
|
90639f48e5 | ||
|
|
8d029a04aa | ||
|
|
67995db899 | ||
|
|
282cd0df7c | ||
|
|
ce58994d30 | ||
|
|
78f5afec30 |
12
.gitmodules
vendored
12
.gitmodules
vendored
@@ -6,15 +6,15 @@
|
||||
path = swagger-ui
|
||||
url = ../scylla-swagger-ui
|
||||
ignore = dirty
|
||||
[submodule "xxHash"]
|
||||
path = xxHash
|
||||
url = ../xxHash
|
||||
[submodule "libdeflate"]
|
||||
path = libdeflate
|
||||
url = ../libdeflate
|
||||
[submodule "zstd"]
|
||||
path = zstd
|
||||
url = ../zstd
|
||||
[submodule "abseil"]
|
||||
path = abseil
|
||||
url = ../abseil-cpp
|
||||
[submodule "scylla-jmx"]
|
||||
path = scylla-jmx
|
||||
url = ../scylla-jmx
|
||||
[submodule "scylla-tools"]
|
||||
path = scylla-tools
|
||||
url = ../scylla-tools-java
|
||||
|
||||
@@ -134,11 +134,15 @@ add_executable(scylla
|
||||
${SEASTAR_SOURCE_FILES}
|
||||
${SCYLLA_SOURCE_FILES})
|
||||
|
||||
# Note that since CLion does not undestand GCC6 concepts, we always disable them (even if users configure otherwise).
|
||||
# CLion seems to have trouble with `-U` (macro undefinition), so we do it this way instead.
|
||||
list(REMOVE_ITEM SEASTAR_CFLAGS "-DHAVE_GCC6_CONCEPTS")
|
||||
|
||||
# If the Seastar pkg-config information is available, append to the default flags.
|
||||
#
|
||||
# For ease of browsing the source code, we always pretend that DPDK is enabled.
|
||||
target_compile_options(scylla PUBLIC
|
||||
-std=gnu++20
|
||||
-std=gnu++1z
|
||||
-DHAVE_DPDK
|
||||
-DHAVE_HWLOC
|
||||
"${SEASTAR_CFLAGS}")
|
||||
|
||||
@@ -8,4 +8,4 @@ Please use the [Issue Tracker](https://github.com/scylladb/scylla/issues/) to re
|
||||
|
||||
# Contributing Code to Scylla
|
||||
|
||||
To contribute code to Scylla, you need to sign the [Contributor License Agreement](https://www.scylladb.com/open-source/contributor-agreement/) and send your changes as [patches](https://github.com/scylladb/scylla/wiki/Formatting-and-sending-patches) to the [mailing list](https://groups.google.com/forum/#!forum/scylladb-dev). We don't accept pull requests on GitHub.
|
||||
To contribute code to Scylla, you need to sign the [Contributor License Agreement](http://www.scylladb.com/opensource/cla/) and send your changes as [patches](https://github.com/scylladb/scylla/wiki/Formatting-and-sending-patches) to the [mailing list](https://groups.google.com/forum/#!forum/scylladb-dev). We don't accept pull requests on GitHub.
|
||||
|
||||
30
HACKING.md
30
HACKING.md
@@ -18,35 +18,23 @@ $ git submodule update --init --recursive
|
||||
|
||||
### Dependencies
|
||||
|
||||
Scylla is fairly fussy about its build environment, requiring a very recent
|
||||
version of the C++20 compiler and numerous tools and libraries to build.
|
||||
Scylla depends on the system package manager for its development dependencies.
|
||||
|
||||
Run `./install-dependencies.sh` (as root) to use your Linux distributions's
|
||||
package manager to install the appropriate packages on your build machine.
|
||||
However, this will only work on very recent distributions. For example,
|
||||
currently Fedora users must upgrade to Fedora 32 otherwise the C++ compiler
|
||||
will be too old, and not support the new C++20 standard that Scylla uses.
|
||||
Running `./install-dependencies.sh` (as root) installs the appropriate packages based on your Linux distribution.
|
||||
|
||||
Alternatively, to avoid having to upgrade your build machine or install
|
||||
various packages on it, we provide another option - the **frozen toolchain**.
|
||||
This is a script, `./tools/toolchain/dbuild`, that can execute build or run
|
||||
commands inside a Docker image that contains exactly the right build tools and
|
||||
libraries. The `dbuild` technique is useful for beginners, but is also the way
|
||||
in which ScyllaDB produces official releases, so it is highly recommended.
|
||||
On Ubuntu and Debian based Linux distributions, some packages
|
||||
required to build Scylla are missing in the official upstream:
|
||||
|
||||
To use `dbuild`, you simply prefix any build or run command with it. Building
|
||||
and running Scylla becomes as easy as:
|
||||
- libthrift-dev and libthrift
|
||||
- antlr3-c++-dev
|
||||
|
||||
```bash
|
||||
$ ./tools/toolchain/dbuild ./configure.py
|
||||
$ ./tools/toolchain/dbuild ninja build/release/scylla
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --developer-mode 1
|
||||
```
|
||||
Try running ```sudo ./scripts/scylla_current_repo``` to add Scylla upstream,
|
||||
and get the missing packages from it.
|
||||
|
||||
### Build system
|
||||
|
||||
**Note**: Compiling Scylla requires, conservatively, 2 GB of memory per native
|
||||
thread, and up to 3 GB per native thread while linking. GCC >= 10 is
|
||||
thread, and up to 3 GB per native thread while linking. GCC >= 8.1.1. is
|
||||
required.
|
||||
|
||||
Scylla is built with [Ninja](https://ninja-build.org/), a low-level rule-based system. A Python script, `configure.py`, generates a Ninja file (`build.ninja`) based on configuration options.
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
This project includes code developed by the Apache Software Foundation (http://www.apache.org/),
|
||||
especially Apache Cassandra.
|
||||
|
||||
It includes files from https://github.com/antonblanchard/crc32-vpmsum (author Anton Blanchard <anton@au.ibm.com>, IBM).
|
||||
It also includes files from https://github.com/antonblanchard/crc32-vpmsum (author Anton Blanchard <anton@au.ibm.com>, IBM).
|
||||
These files are located in utils/arch/powerpc/crc32-vpmsum. Their license may be found in licenses/LICENSE-crc32-vpmsum.TXT.
|
||||
|
||||
It includes modified code from https://gitbox.apache.org/repos/asf?p=cassandra-dtest.git (owned by The Apache Software Foundation)
|
||||
|
||||
33
README.md
33
README.md
@@ -2,24 +2,22 @@
|
||||
|
||||
## Quick-start
|
||||
|
||||
Scylla is fairly fussy about its build environment, requiring very recent
|
||||
versions of the C++20 compiler and of many libraries to build. The document
|
||||
[HACKING.md](HACKING.md) includes detailed information on building and
|
||||
developing Scylla, but to get Scylla building quickly on (almost) any build
|
||||
machine, Scylla offers offers a [frozen toolchain](tools/toolchain/README.md),
|
||||
This is a pre-configured Docker image which includes recent versions of all
|
||||
the required compilers, libraries and build tools. Using the frozen toolchain
|
||||
allows you to avoid changing anything in your build machine to meet Scylla's
|
||||
requirements - you just need to meet the frozen toolchain's prerequisites
|
||||
(mostly, Docker or Podman being available).
|
||||
|
||||
Building and running Scylla with the frozen toolchain is as easy as:
|
||||
To get the build going quickly, Scylla offers a [frozen toolchain](tools/toolchain/README.md)
|
||||
which would build and run Scylla using a pre-configured Docker image.
|
||||
Using the frozen toolchain will also isolate all of the installed
|
||||
dependencies in a Docker container.
|
||||
Assuming you have met the toolchain prerequisites, which is running
|
||||
Docker in user mode, building and running is as easy as:
|
||||
|
||||
```bash
|
||||
$ ./tools/toolchain/dbuild ./configure.py
|
||||
$ ./tools/toolchain/dbuild ninja build/release/scylla
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --developer-mode 1
|
||||
```
|
||||
```
|
||||
|
||||
Please see [HACKING.md](HACKING.md) for detailed information on building and developing Scylla.
|
||||
|
||||
**Note**: GCC >= 8.1.1 is required to compile Scylla.
|
||||
|
||||
## Running Scylla
|
||||
|
||||
@@ -69,20 +67,15 @@ The courses are free, self-paced and include hands-on examples. They cover a var
|
||||
administration, architecture, basic NoSQL concepts, using drivers for application development, Scylla setup, failover, compactions,
|
||||
multi-datacenters and how Scylla integrates with third-party applications.
|
||||
|
||||
## Building a CentOS-based Docker image
|
||||
## Building Fedora-based Docker image
|
||||
|
||||
Build a Docker image with:
|
||||
|
||||
```
|
||||
cd dist/docker/redhat
|
||||
cd dist/docker
|
||||
docker build -t <image-name> .
|
||||
```
|
||||
|
||||
This build is based on executables downloaded from downloads.scylladb.com,
|
||||
**not** on the executables built in this source directory. See further
|
||||
instructions in dist/docker/redhat/README.md to build a docker image from
|
||||
your own executables.
|
||||
|
||||
Run the image with:
|
||||
|
||||
```
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
PRODUCT=scylla
|
||||
VERSION=4.2.4
|
||||
VERSION=4.0.11
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "absl-flat_hash_map.hh"
|
||||
|
||||
size_t sstring_hash::operator()(std::string_view v) const noexcept {
|
||||
return absl::Hash<std::string_view>{}(v);
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <absl/container/flat_hash_map.h>
|
||||
#include <seastar/core/sstring.hh>
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
struct sstring_hash {
|
||||
using is_transparent = void;
|
||||
size_t operator()(std::string_view v) const noexcept;
|
||||
};
|
||||
|
||||
struct sstring_eq {
|
||||
using is_transparent = void;
|
||||
bool operator()(std::string_view a, std::string_view b) const noexcept {
|
||||
return a == b;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename K, typename V, typename... Ts>
|
||||
struct flat_hash_map : public absl::flat_hash_map<K, V, Ts...> {
|
||||
};
|
||||
|
||||
template <typename V>
|
||||
struct flat_hash_map<sstring, V>
|
||||
: public absl::flat_hash_map<sstring, V, sstring_hash, sstring_eq> {};
|
||||
@@ -129,7 +129,7 @@ future<std::string> get_key_from_roles(cql3::query_processor& qp, std::string us
|
||||
auth::meta::roles_table::qualified_name(), auth::meta::roles_table::role_col_name);
|
||||
|
||||
auto cl = auth::password_authenticator::consistency_for_user(username);
|
||||
auto& timeout = auth::internal_distributed_timeout_config();
|
||||
auto timeout = auth::internal_distributed_timeout_config();
|
||||
return qp.execute_internal(query, cl, timeout, {sstring(username)}, true).then_wrapped([username = std::move(username)] (future<::shared_ptr<cql3::untyped_result_set>> f) {
|
||||
auto res = f.get0();
|
||||
auto salted_hash = std::optional<sstring>();
|
||||
|
||||
@@ -77,7 +77,7 @@ std::string base64_encode(bytes_view in) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
static std::string base64_decode_string(std::string_view in) {
|
||||
bytes base64_decode(std::string_view in) {
|
||||
int i = 0;
|
||||
int8_t chunk4[4]; // chunk of input, each byte converted to 0..63;
|
||||
std::string ret;
|
||||
@@ -104,42 +104,8 @@ static std::string base64_decode_string(std::string_view in) {
|
||||
if (i==3)
|
||||
ret += ((chunk4[1] & 0xf) << 4) + ((chunk4[2] & 0x3c) >> 2);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bytes base64_decode(std::string_view in) {
|
||||
// FIXME: This copy is sad. The problem is we need back "bytes"
|
||||
// but "bytes" doesn't have efficient append and std::string.
|
||||
// To fix this we need to use bytes' "uninitialized" feature.
|
||||
std::string ret = base64_decode_string(in);
|
||||
return bytes(ret.begin(), ret.end());
|
||||
}
|
||||
|
||||
static size_t base64_padding_len(std::string_view str) {
|
||||
size_t padding = 0;
|
||||
padding += (!str.empty() && str.back() == '=');
|
||||
padding += (str.size() > 1 && *(str.end() - 2) == '=');
|
||||
return padding;
|
||||
}
|
||||
|
||||
size_t base64_decoded_len(std::string_view str) {
|
||||
return str.size() / 4 * 3 - base64_padding_len(str);
|
||||
}
|
||||
|
||||
bool base64_begins_with(std::string_view base, std::string_view operand) {
|
||||
if (base.size() < operand.size() || base.size() % 4 != 0 || operand.size() % 4 != 0) {
|
||||
return false;
|
||||
}
|
||||
if (base64_padding_len(operand) == 0) {
|
||||
return base.starts_with(operand);
|
||||
}
|
||||
const std::string_view unpadded_base_prefix = base.substr(0, operand.size() - 4);
|
||||
const std::string_view unpadded_operand = operand.substr(0, operand.size() - 4);
|
||||
if (unpadded_base_prefix != unpadded_operand) {
|
||||
return false;
|
||||
}
|
||||
// Decode and compare last 4 bytes of base64-encoded strings
|
||||
const std::string base_remainder = base64_decode_string(base.substr(operand.size() - 4, operand.size()));
|
||||
const std::string operand_remainder = base64_decode_string(operand.substr(operand.size() - 4));
|
||||
return base_remainder.starts_with(operand_remainder);
|
||||
}
|
||||
|
||||
@@ -32,7 +32,3 @@ bytes base64_decode(std::string_view);
|
||||
inline bytes base64_decode(const rjson::value& v) {
|
||||
return base64_decode(std::string_view(v.GetString(), v.GetStringLength()));
|
||||
}
|
||||
|
||||
size_t base64_decoded_len(std::string_view str);
|
||||
|
||||
bool base64_begins_with(std::string_view base, std::string_view operand);
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
#include "expressions.hh"
|
||||
#include "expressions_eval.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -67,6 +67,49 @@ comparison_operator_type get_comparison_operator(const rjson::value& comparison_
|
||||
return it->second;
|
||||
}
|
||||
|
||||
static ::shared_ptr<cql3::restrictions::single_column_restriction::contains> make_map_element_restriction(const column_definition& cdef, std::string_view key, const rjson::value& value) {
|
||||
bytes raw_key = utf8_type->from_string(sstring_view(key.data(), key.size()));
|
||||
auto key_value = ::make_shared<cql3::constants::value>(cql3::raw_value::make_value(std::move(raw_key)));
|
||||
bytes raw_value = serialize_item(value);
|
||||
auto entry_value = ::make_shared<cql3::constants::value>(cql3::raw_value::make_value(std::move(raw_value)));
|
||||
return make_shared<cql3::restrictions::single_column_restriction::contains>(cdef, std::move(key_value), std::move(entry_value));
|
||||
}
|
||||
|
||||
static ::shared_ptr<cql3::restrictions::single_column_restriction::EQ> make_key_eq_restriction(const column_definition& cdef, const rjson::value& value) {
|
||||
bytes raw_value = get_key_from_typed_value(value, cdef);
|
||||
auto restriction_value = ::make_shared<cql3::constants::value>(cql3::raw_value::make_value(std::move(raw_value)));
|
||||
return make_shared<cql3::restrictions::single_column_restriction::EQ>(cdef, std::move(restriction_value));
|
||||
}
|
||||
|
||||
::shared_ptr<cql3::restrictions::statement_restrictions> get_filtering_restrictions(schema_ptr schema, const column_definition& attrs_col, const rjson::value& query_filter) {
|
||||
clogger.trace("Getting filtering restrictions for: {}", rjson::print(query_filter));
|
||||
auto filtering_restrictions = ::make_shared<cql3::restrictions::statement_restrictions>(schema, true);
|
||||
for (auto it = query_filter.MemberBegin(); it != query_filter.MemberEnd(); ++it) {
|
||||
std::string_view column_name(it->name.GetString(), it->name.GetStringLength());
|
||||
const rjson::value& condition = it->value;
|
||||
|
||||
const rjson::value& comp_definition = rjson::get(condition, "ComparisonOperator");
|
||||
const rjson::value& attr_list = rjson::get(condition, "AttributeValueList");
|
||||
comparison_operator_type op = get_comparison_operator(comp_definition);
|
||||
|
||||
if (op != comparison_operator_type::EQ) {
|
||||
throw api_error("ValidationException", "Filtering is currently implemented for EQ operator only");
|
||||
}
|
||||
if (attr_list.Size() != 1) {
|
||||
throw api_error("ValidationException", format("EQ restriction needs exactly 1 attribute value: {}", rjson::print(attr_list)));
|
||||
}
|
||||
if (const column_definition* cdef = schema->get_column_definition(to_bytes(column_name.data()))) {
|
||||
// Primary key restriction
|
||||
filtering_restrictions->add_restriction(make_key_eq_restriction(*cdef, attr_list[0]), false, true);
|
||||
} else {
|
||||
// Regular column restriction
|
||||
filtering_restrictions->add_restriction(make_map_element_restriction(attrs_col, column_name, attr_list[0]), false, true);
|
||||
}
|
||||
|
||||
}
|
||||
return filtering_restrictions;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct size_check {
|
||||
@@ -98,11 +141,6 @@ struct nonempty : public size_check {
|
||||
|
||||
// Check that array has the expected number of elements
|
||||
static void verify_operand_count(const rjson::value* array, const size_check& expected, const rjson::value& op) {
|
||||
if (!array && expected(0)) {
|
||||
// If expected() allows an empty AttributeValueList, it is also fine
|
||||
// that it is missing.
|
||||
return;
|
||||
}
|
||||
if (!array || !array->IsArray()) {
|
||||
throw api_error("ValidationException", "With ComparisonOperator, AttributeValueList must be given and an array");
|
||||
}
|
||||
@@ -159,47 +197,36 @@ static bool check_NE(const rjson::value* v1, const rjson::value& v2) {
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the BEGINS_WITH relation
|
||||
bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2,
|
||||
bool v1_from_query, bool v2_from_query) {
|
||||
bool bad = false;
|
||||
if (!v1 || !v1->IsObject() || v1->MemberCount() != 1) {
|
||||
if (v1_from_query) {
|
||||
throw api_error("ValidationException", "begins_with() encountered malformed argument");
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
} else if (v1->MemberBegin()->name != "S" && v1->MemberBegin()->name != "B") {
|
||||
if (v1_from_query) {
|
||||
throw api_error("ValidationException", format("begins_with supports only string or binary type, got: {}", *v1));
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
static bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2) {
|
||||
// BEGINS_WITH requires that its single operand (v2) be a string or
|
||||
// binary - otherwise it's a validation error. However, problems with
|
||||
// the stored attribute (v1) will just return false (no match).
|
||||
if (!v2.IsObject() || v2.MemberCount() != 1) {
|
||||
if (v2_from_query) {
|
||||
throw api_error("ValidationException", "begins_with() encountered malformed argument");
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
} else if (v2.MemberBegin()->name != "S" && v2.MemberBegin()->name != "B") {
|
||||
if (v2_from_query) {
|
||||
throw api_error("ValidationException", format("begins_with() supports only string or binary type, got: {}", v2));
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
throw api_error("ValidationException", format("BEGINS_WITH operator encountered malformed AttributeValue: {}", v2));
|
||||
}
|
||||
if (bad) {
|
||||
auto it2 = v2.MemberBegin();
|
||||
if (it2->name != "S" && it2->name != "B") {
|
||||
throw api_error("ValidationException", format("BEGINS_WITH operator requires String or Binary in AttributeValue, got {}", it2->name));
|
||||
}
|
||||
|
||||
|
||||
if (!v1 || !v1->IsObject() || v1->MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
auto it1 = v1->MemberBegin();
|
||||
auto it2 = v2.MemberBegin();
|
||||
if (it1->name != it2->name) {
|
||||
return false;
|
||||
}
|
||||
if (it2->name == "S") {
|
||||
return rjson::to_string_view(it1->value).starts_with(rjson::to_string_view(it2->value));
|
||||
std::string_view val1(it1->value.GetString(), it1->value.GetStringLength());
|
||||
std::string_view val2(it2->value.GetString(), it2->value.GetStringLength());
|
||||
return val1.substr(0, val2.size()) == val2;
|
||||
} else /* it2->name == "B" */ {
|
||||
return base64_begins_with(rjson::to_string_view(it1->value), rjson::to_string_view(it2->value));
|
||||
// TODO (optimization): Check the begins_with condition directly on
|
||||
// the base64-encoded string, without making a decoded copy.
|
||||
bytes val1 = base64_decode(it1->value);
|
||||
bytes val2 = base64_decode(it2->value);
|
||||
return val1.substr(0, val2.size()) == val2;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,6 +241,11 @@ bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2) {
|
||||
}
|
||||
const auto& kv1 = *v1->MemberBegin();
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv2.name != "S" && kv2.name != "N" && kv2.name != "B") {
|
||||
throw api_error("ValidationException",
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", kv2.name));
|
||||
}
|
||||
if (kv1.name == "S" && kv2.name == "S") {
|
||||
return rjson::to_string_view(kv1.value).find(rjson::to_string_view(kv2.value)) != std::string_view::npos;
|
||||
} else if (kv1.name == "B" && kv2.name == "B") {
|
||||
@@ -296,38 +328,24 @@ static bool check_NOT_NULL(const rjson::value* val) {
|
||||
return val != nullptr;
|
||||
}
|
||||
|
||||
// Only types S, N or B (string, number or bytes) may be compared by the
|
||||
// various comparion operators - lt, le, gt, ge, and between.
|
||||
static bool check_comparable_type(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
const rjson::value& type = v.MemberBegin()->name;
|
||||
return type == "S" || type == "N" || type == "B";
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with cmp.
|
||||
template <typename Comparator>
|
||||
bool check_compare(const rjson::value* v1, const rjson::value& v2, const Comparator& cmp,
|
||||
bool v1_from_query, bool v2_from_query) {
|
||||
bool bad = false;
|
||||
if (!v1 || !check_comparable_type(*v1)) {
|
||||
if (v1_from_query) {
|
||||
throw api_error("ValidationException", format("{} allow only the types String, Number, or Binary", cmp.diagnostic));
|
||||
}
|
||||
bad = true;
|
||||
bool check_compare(const rjson::value* v1, const rjson::value& v2, const Comparator& cmp) {
|
||||
if (!v2.IsObject() || v2.MemberCount() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("{} requires a single AttributeValue of type String, Number, or Binary",
|
||||
cmp.diagnostic));
|
||||
}
|
||||
if (!check_comparable_type(v2)) {
|
||||
if (v2_from_query) {
|
||||
throw api_error("ValidationException", format("{} allow only the types String, Number, or Binary", cmp.diagnostic));
|
||||
}
|
||||
bad = true;
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv2.name != "S" && kv2.name != "N" && kv2.name != "B") {
|
||||
throw api_error("ValidationException",
|
||||
format("{} requires a single AttributeValue of type String, Number, or Binary",
|
||||
cmp.diagnostic));
|
||||
}
|
||||
if (bad) {
|
||||
if (!v1 || !v1->IsObject() || v1->MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
const auto& kv1 = *v1->MemberBegin();
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv1.name != kv2.name) {
|
||||
return false;
|
||||
}
|
||||
@@ -341,8 +359,7 @@ bool check_compare(const rjson::value* v1, const rjson::value& v2, const Compara
|
||||
if (kv1.name == "B") {
|
||||
return cmp(base64_decode(kv1.value), base64_decode(kv2.value));
|
||||
}
|
||||
// cannot reach here, as check_comparable_type() verifies the type is one
|
||||
// of the above options.
|
||||
clogger.error("check_compare panic: LHS type equals RHS type, but one is in {N,S,B} while the other isn't");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -373,71 +390,57 @@ struct cmp_gt {
|
||||
static constexpr const char* diagnostic = "GT operator";
|
||||
};
|
||||
|
||||
// True if v is between lb and ub, inclusive. Throws or returns false
|
||||
// (depending on bounds_from_query parameter) if lb > ub.
|
||||
// True if v is between lb and ub, inclusive. Throws if lb > ub.
|
||||
template <typename T>
|
||||
static bool check_BETWEEN(const T& v, const T& lb, const T& ub, bool bounds_from_query) {
|
||||
bool check_BETWEEN(const T& v, const T& lb, const T& ub) {
|
||||
if (cmp_lt()(ub, lb)) {
|
||||
if (bounds_from_query) {
|
||||
throw api_error("ValidationException",
|
||||
format("BETWEEN operator requires lower_bound <= upper_bound, but {} > {}", lb, ub));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
throw api_error("ValidationException",
|
||||
format("BETWEEN operator requires lower_bound <= upper_bound, but {} > {}", lb, ub));
|
||||
}
|
||||
return cmp_ge()(v, lb) && cmp_le()(v, ub);
|
||||
}
|
||||
|
||||
static bool check_BETWEEN(const rjson::value* v, const rjson::value& lb, const rjson::value& ub,
|
||||
bool v_from_query, bool lb_from_query, bool ub_from_query) {
|
||||
if ((v && v_from_query && !check_comparable_type(*v)) ||
|
||||
(lb_from_query && !check_comparable_type(lb)) ||
|
||||
(ub_from_query && !check_comparable_type(ub))) {
|
||||
throw api_error("ValidationException", "between allow only the types String, Number, or Binary");
|
||||
|
||||
}
|
||||
if (!v || !v->IsObject() || v->MemberCount() != 1 ||
|
||||
!lb.IsObject() || lb.MemberCount() != 1 ||
|
||||
!ub.IsObject() || ub.MemberCount() != 1) {
|
||||
static bool check_BETWEEN(const rjson::value* v, const rjson::value& lb, const rjson::value& ub) {
|
||||
if (!v) {
|
||||
return false;
|
||||
}
|
||||
if (!v->IsObject() || v->MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BETWEEN operator encountered malformed AttributeValue: {}", *v));
|
||||
}
|
||||
if (!lb.IsObject() || lb.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BETWEEN operator encountered malformed AttributeValue: {}", lb));
|
||||
}
|
||||
if (!ub.IsObject() || ub.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BETWEEN operator encountered malformed AttributeValue: {}", ub));
|
||||
}
|
||||
|
||||
const auto& kv_v = *v->MemberBegin();
|
||||
const auto& kv_lb = *lb.MemberBegin();
|
||||
const auto& kv_ub = *ub.MemberBegin();
|
||||
bool bounds_from_query = lb_from_query && ub_from_query;
|
||||
if (kv_lb.name != kv_ub.name) {
|
||||
if (bounds_from_query) {
|
||||
throw api_error("ValidationException",
|
||||
throw api_error(
|
||||
"ValidationException",
|
||||
format("BETWEEN operator requires the same type for lower and upper bound; instead got {} and {}",
|
||||
kv_lb.name, kv_ub.name));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (kv_v.name != kv_lb.name) { // Cannot compare different types, so v is NOT between lb and ub.
|
||||
return false;
|
||||
}
|
||||
if (kv_v.name == "N") {
|
||||
const char* diag = "BETWEEN operator";
|
||||
return check_BETWEEN(unwrap_number(*v, diag), unwrap_number(lb, diag), unwrap_number(ub, diag), bounds_from_query);
|
||||
return check_BETWEEN(unwrap_number(*v, diag), unwrap_number(lb, diag), unwrap_number(ub, diag));
|
||||
}
|
||||
if (kv_v.name == "S") {
|
||||
return check_BETWEEN(std::string_view(kv_v.value.GetString(), kv_v.value.GetStringLength()),
|
||||
std::string_view(kv_lb.value.GetString(), kv_lb.value.GetStringLength()),
|
||||
std::string_view(kv_ub.value.GetString(), kv_ub.value.GetStringLength()),
|
||||
bounds_from_query);
|
||||
std::string_view(kv_ub.value.GetString(), kv_ub.value.GetStringLength()));
|
||||
}
|
||||
if (kv_v.name == "B") {
|
||||
return check_BETWEEN(base64_decode(kv_v.value), base64_decode(kv_lb.value), base64_decode(kv_ub.value), bounds_from_query);
|
||||
return check_BETWEEN(base64_decode(kv_v.value), base64_decode(kv_lb.value), base64_decode(kv_ub.value));
|
||||
}
|
||||
if (v_from_query) {
|
||||
throw api_error("ValidationException",
|
||||
format("BETWEEN operator requires AttributeValueList elements to be of type String, Number, or Binary; instead got {}",
|
||||
throw api_error("ValidationException",
|
||||
format("BETWEEN operator requires AttributeValueList elements to be of type String, Number, or Binary; instead got {}",
|
||||
kv_lb.name));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify one Expect condition on one attribute (whose content is "got")
|
||||
@@ -484,19 +487,19 @@ static bool verify_expected_one(const rjson::value& condition, const rjson::valu
|
||||
return check_NE(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::LT:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_lt{}, false, true);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_lt{});
|
||||
case comparison_operator_type::LE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_le{}, false, true);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_le{});
|
||||
case comparison_operator_type::GT:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_gt{}, false, true);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_gt{});
|
||||
case comparison_operator_type::GE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_ge{}, false, true);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_ge{});
|
||||
case comparison_operator_type::BEGINS_WITH:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_BEGINS_WITH(got, (*attribute_value_list)[0], false, true);
|
||||
return check_BEGINS_WITH(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::IN:
|
||||
verify_operand_count(attribute_value_list, nonempty(), *comparison_operator);
|
||||
return check_IN(got, *attribute_value_list);
|
||||
@@ -508,87 +511,56 @@ static bool verify_expected_one(const rjson::value& condition, const rjson::valu
|
||||
return check_NOT_NULL(got);
|
||||
case comparison_operator_type::BETWEEN:
|
||||
verify_operand_count(attribute_value_list, exact_size(2), *comparison_operator);
|
||||
return check_BETWEEN(got, (*attribute_value_list)[0], (*attribute_value_list)[1],
|
||||
false, true, true);
|
||||
return check_BETWEEN(got, (*attribute_value_list)[0], (*attribute_value_list)[1]);
|
||||
case comparison_operator_type::CONTAINS:
|
||||
{
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
// Expected's "CONTAINS" has this artificial limitation.
|
||||
// ConditionExpression's "contains()" does not...
|
||||
const rjson::value& arg = (*attribute_value_list)[0];
|
||||
const auto& argtype = (*arg.MemberBegin()).name;
|
||||
if (argtype != "S" && argtype != "N" && argtype != "B") {
|
||||
throw api_error("ValidationException",
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", argtype));
|
||||
}
|
||||
return check_CONTAINS(got, arg);
|
||||
}
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_CONTAINS(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::NOT_CONTAINS:
|
||||
{
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
// Expected's "NOT_CONTAINS" has this artificial limitation.
|
||||
// ConditionExpression's "contains()" does not...
|
||||
const rjson::value& arg = (*attribute_value_list)[0];
|
||||
const auto& argtype = (*arg.MemberBegin()).name;
|
||||
if (argtype != "S" && argtype != "N" && argtype != "B") {
|
||||
throw api_error("ValidationException",
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", argtype));
|
||||
}
|
||||
return check_NOT_CONTAINS(got, arg);
|
||||
}
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_NOT_CONTAINS(got, (*attribute_value_list)[0]);
|
||||
}
|
||||
throw std::logic_error(format("Internal error: corrupted operator enum: {}", int(op)));
|
||||
}
|
||||
}
|
||||
|
||||
conditional_operator_type get_conditional_operator(const rjson::value& req) {
|
||||
const rjson::value* conditional_operator = rjson::find(req, "ConditionalOperator");
|
||||
if (!conditional_operator) {
|
||||
return conditional_operator_type::MISSING;
|
||||
}
|
||||
if (!conditional_operator->IsString()) {
|
||||
throw api_error("ValidationException", "'ConditionalOperator' parameter, if given, must be a string");
|
||||
}
|
||||
auto s = rjson::to_string_view(*conditional_operator);
|
||||
if (s == "AND") {
|
||||
return conditional_operator_type::AND;
|
||||
} else if (s == "OR") {
|
||||
return conditional_operator_type::OR;
|
||||
} else {
|
||||
throw api_error("ValidationException",
|
||||
format("'ConditionalOperator' parameter must be AND, OR or missing. Found {}.", s));
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the existing values of the item (previous_item) match the
|
||||
// conditions given by the Expected and ConditionalOperator parameters
|
||||
// (if they exist) in the request (an UpdateItem, PutItem or DeleteItem).
|
||||
// This function can throw an ValidationException API error if there
|
||||
// are errors in the format of the condition itself.
|
||||
bool verify_expected(const rjson::value& req, const rjson::value* previous_item) {
|
||||
bool verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value>& previous_item) {
|
||||
const rjson::value* expected = rjson::find(req, "Expected");
|
||||
auto conditional_operator = get_conditional_operator(req);
|
||||
if (conditional_operator != conditional_operator_type::MISSING &&
|
||||
(!expected || (expected->IsObject() && expected->GetObject().ObjectEmpty()))) {
|
||||
throw api_error("ValidationException", "'ConditionalOperator' parameter cannot be specified for missing or empty Expression");
|
||||
}
|
||||
if (!expected) {
|
||||
return true;
|
||||
}
|
||||
if (!expected->IsObject()) {
|
||||
throw api_error("ValidationException", "'Expected' parameter, if given, must be an object");
|
||||
}
|
||||
bool require_all = conditional_operator != conditional_operator_type::OR;
|
||||
return verify_condition(*expected, require_all, previous_item);
|
||||
}
|
||||
// ConditionalOperator can be "AND" for requiring all conditions, or
|
||||
// "OR" for requiring one condition, and defaults to "AND" if missing.
|
||||
const rjson::value* conditional_operator = rjson::find(req, "ConditionalOperator");
|
||||
bool require_all = true;
|
||||
if (conditional_operator) {
|
||||
if (!conditional_operator->IsString()) {
|
||||
throw api_error("ValidationException", "'ConditionalOperator' parameter, if given, must be a string");
|
||||
}
|
||||
std::string_view s(conditional_operator->GetString(), conditional_operator->GetStringLength());
|
||||
if (s == "AND") {
|
||||
// require_all is already true
|
||||
} else if (s == "OR") {
|
||||
require_all = false;
|
||||
} else {
|
||||
throw api_error("ValidationException", "'ConditionalOperator' parameter must be AND, OR or missing");
|
||||
}
|
||||
if (expected->GetObject().ObjectEmpty()) {
|
||||
throw api_error("ValidationException", "'ConditionalOperator' parameter cannot be specified for empty Expression");
|
||||
}
|
||||
}
|
||||
|
||||
bool verify_condition(const rjson::value& condition, bool require_all, const rjson::value* previous_item) {
|
||||
for (auto it = condition.MemberBegin(); it != condition.MemberEnd(); ++it) {
|
||||
for (auto it = expected->MemberBegin(); it != expected->MemberEnd(); ++it) {
|
||||
const rjson::value* got = nullptr;
|
||||
if (previous_item) {
|
||||
got = rjson::find(*previous_item, rjson::to_string_view(it->name));
|
||||
if (previous_item && previous_item->IsObject() && previous_item->HasMember("Item")) {
|
||||
got = rjson::find((*previous_item)["Item"], rjson::to_string_view(it->name));
|
||||
}
|
||||
bool success = verify_expected_one(it->value, got);
|
||||
if (success && !require_all) {
|
||||
@@ -604,8 +576,12 @@ bool verify_condition(const rjson::value& condition, bool require_all, const rjs
|
||||
return require_all;
|
||||
}
|
||||
|
||||
static bool calculate_primitive_condition(const parsed::primitive_condition& cond,
|
||||
const rjson::value* previous_item) {
|
||||
bool calculate_primitive_condition(const parsed::primitive_condition& cond,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
const rjson::value& req,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item) {
|
||||
std::vector<rjson::value> calculated_values;
|
||||
calculated_values.reserve(cond._values.size());
|
||||
for (const parsed::value& v : cond._values) {
|
||||
@@ -613,7 +589,9 @@ static bool calculate_primitive_condition(const parsed::primitive_condition& con
|
||||
cond._op == parsed::primitive_condition::type::VALUE ?
|
||||
calculate_value_caller::ConditionExpressionAlone :
|
||||
calculate_value_caller::ConditionExpression,
|
||||
previous_item));
|
||||
rjson::find(req, "ExpressionAttributeValues"),
|
||||
used_attribute_names, used_attribute_values,
|
||||
req, schema, previous_item));
|
||||
}
|
||||
switch (cond._op) {
|
||||
case parsed::primitive_condition::type::BETWEEN:
|
||||
@@ -621,8 +599,7 @@ static bool calculate_primitive_condition(const parsed::primitive_condition& con
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Wrong number of values {} in BETWEEN primitive_condition", cond._values.size()));
|
||||
}
|
||||
return check_BETWEEN(&calculated_values[0], calculated_values[1], calculated_values[2],
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant(), cond._values[2].is_constant());
|
||||
return check_BETWEEN(&calculated_values[0], calculated_values[1], calculated_values[2]);
|
||||
case parsed::primitive_condition::type::IN:
|
||||
return check_IN(calculated_values);
|
||||
case parsed::primitive_condition::type::VALUE:
|
||||
@@ -653,17 +630,13 @@ static bool calculate_primitive_condition(const parsed::primitive_condition& con
|
||||
case parsed::primitive_condition::type::NE:
|
||||
return check_NE(&calculated_values[0], calculated_values[1]);
|
||||
case parsed::primitive_condition::type::GT:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_gt{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_gt{});
|
||||
case parsed::primitive_condition::type::GE:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_ge{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_ge{});
|
||||
case parsed::primitive_condition::type::LT:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_lt{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_lt{});
|
||||
case parsed::primitive_condition::type::LE:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_le{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_le{});
|
||||
default:
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Unknown type {} in primitive_condition object", (int)(cond._op)));
|
||||
@@ -674,17 +647,23 @@ static bool calculate_primitive_condition(const parsed::primitive_condition& con
|
||||
// conditions given by the given parsed ConditionExpression.
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
const rjson::value* previous_item) {
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
const rjson::value& req,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item) {
|
||||
if (condition_expression.empty()) {
|
||||
return true;
|
||||
}
|
||||
bool ret = std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) -> bool {
|
||||
return calculate_primitive_condition(cond, previous_item);
|
||||
return calculate_primitive_condition(cond, used_attribute_values,
|
||||
used_attribute_names, req, schema, previous_item);
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) -> bool {
|
||||
auto verify_condition = [&] (const parsed::condition_expression& e) {
|
||||
return verify_condition_expression(e, previous_item);
|
||||
return verify_condition_expression(e, used_attribute_values,
|
||||
used_attribute_names, req, schema, previous_item);
|
||||
};
|
||||
switch (list.op) {
|
||||
case '&':
|
||||
|
||||
@@ -33,7 +33,6 @@
|
||||
|
||||
#include "cql3/restrictions/statement_restrictions.hh"
|
||||
#include "serialization.hh"
|
||||
#include "expressions_types.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -43,19 +42,8 @@ enum class comparison_operator_type {
|
||||
|
||||
comparison_operator_type get_comparison_operator(const rjson::value& comparison_operator);
|
||||
|
||||
enum class conditional_operator_type {
|
||||
AND, OR, MISSING
|
||||
};
|
||||
conditional_operator_type get_conditional_operator(const rjson::value& req);
|
||||
::shared_ptr<cql3::restrictions::statement_restrictions> get_filtering_restrictions(schema_ptr schema, const column_definition& attrs_col, const rjson::value& query_filter);
|
||||
|
||||
bool verify_expected(const rjson::value& req, const rjson::value* previous_item);
|
||||
bool verify_condition(const rjson::value& condition, bool require_all, const rjson::value* previous_item);
|
||||
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2);
|
||||
bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query);
|
||||
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
const rjson::value* previous_item);
|
||||
bool verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value>& previous_item);
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -50,7 +50,6 @@ public:
|
||||
stats _stats;
|
||||
static constexpr auto ATTRS_COLUMN_NAME = ":attrs";
|
||||
static constexpr auto KEYSPACE_NAME_PREFIX = "alternator_";
|
||||
static constexpr std::string_view INTERNAL_TABLE_PREFIX = ".scylla.alternator.";
|
||||
|
||||
executor(service::storage_proxy& proxy, service::migration_manager& mm, smp_service_group ssg)
|
||||
: _proxy(proxy), _mm(mm), _ssg(ssg) {}
|
||||
|
||||
@@ -20,24 +20,16 @@
|
||||
*/
|
||||
|
||||
#include "expressions.hh"
|
||||
#include "serialization.hh"
|
||||
#include "base64.hh"
|
||||
#include "conditions.hh"
|
||||
#include "alternator/expressionsLexer.hpp"
|
||||
#include "alternator/expressionsParser.hpp"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "error.hh"
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include <seastarx.hh>
|
||||
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/util/log.hh>
|
||||
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -130,555 +122,6 @@ void condition_expression::append(condition_expression&& a, char op) {
|
||||
}, _expression);
|
||||
}
|
||||
|
||||
|
||||
} // namespace parsed
|
||||
|
||||
// The following resolve_*() functions resolve references in parsed
|
||||
// expressions of different types. Resolving a parsed expression means
|
||||
// replacing:
|
||||
// 1. In parsed::path objects, replace references like "#name" with the
|
||||
// attribute name from ExpressionAttributeNames,
|
||||
// 2. In parsed::constant objects, replace references like ":value" with
|
||||
// the value from ExpressionAttributeValues.
|
||||
// These function also track which name and value references were used, to
|
||||
// allow complaining if some remain unused.
|
||||
// Note that the resolve_*() functions modify the expressions in-place,
|
||||
// so if we ever intend to cache parsed expression, we need to pass a copy
|
||||
// into this function.
|
||||
//
|
||||
// Doing the "resolving" stage before the evaluation stage has two benefits.
|
||||
// First, it allows us to be compatible with DynamoDB in catching unused
|
||||
// names and values (see issue #6572). Second, in the FilterExpression case,
|
||||
// we need to resolve the expression just once but then use it many times
|
||||
// (once for each item to be filtered).
|
||||
|
||||
static void resolve_path(parsed::path& p,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names) {
|
||||
const std::string& column_name = p.root();
|
||||
if (column_name.size() > 0 && column_name.front() == '#') {
|
||||
if (!expression_attribute_names) {
|
||||
throw api_error("ValidationException",
|
||||
format("ExpressionAttributeNames missing, entry '{}' required by expression", column_name));
|
||||
}
|
||||
const rjson::value* value = rjson::find(*expression_attribute_names, column_name);
|
||||
if (!value || !value->IsString()) {
|
||||
throw api_error("ValidationException",
|
||||
format("ExpressionAttributeNames missing entry '{}' required by expression", column_name));
|
||||
}
|
||||
used_attribute_names.emplace(column_name);
|
||||
p.set_root(std::string(rjson::to_string_view(*value)));
|
||||
}
|
||||
}
|
||||
|
||||
static void resolve_constant(parsed::constant& c,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const std::string& valref) {
|
||||
if (!expression_attribute_values) {
|
||||
throw api_error("ValidationException",
|
||||
format("ExpressionAttributeValues missing, entry '{}' required by expression", valref));
|
||||
}
|
||||
const rjson::value* value = rjson::find(*expression_attribute_values, valref);
|
||||
if (!value) {
|
||||
throw api_error("ValidationException",
|
||||
format("ExpressionAttributeValues missing entry '{}' required by expression", valref));
|
||||
}
|
||||
if (value->IsNull()) {
|
||||
throw api_error("ValidationException",
|
||||
format("ExpressionAttributeValues null value for entry '{}' required by expression", valref));
|
||||
}
|
||||
validate_value(*value, "ExpressionAttributeValues");
|
||||
used_attribute_values.emplace(valref);
|
||||
c.set(*value);
|
||||
},
|
||||
[&] (const parsed::constant::literal& lit) {
|
||||
// Nothing to do, already resolved
|
||||
}
|
||||
}, c._value);
|
||||
|
||||
}
|
||||
|
||||
void resolve_value(parsed::value& rhs,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (parsed::constant& c) {
|
||||
resolve_constant(c, expression_attribute_values, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::value::function_call& f) {
|
||||
for (parsed::value& value : f._parameters) {
|
||||
resolve_value(value, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
}
|
||||
},
|
||||
[&] (parsed::path& p) {
|
||||
resolve_path(p, expression_attribute_names, used_attribute_names);
|
||||
}
|
||||
}, rhs._value);
|
||||
}
|
||||
|
||||
void resolve_set_rhs(parsed::set_rhs& rhs,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
resolve_value(rhs._v1, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
if (rhs._op != 'v') {
|
||||
resolve_value(rhs._v2, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
}
|
||||
}
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
for (parsed::update_expression::action& action : ue.actions()) {
|
||||
resolve_path(action._path, expression_attribute_names, used_attribute_names);
|
||||
std::visit(overloaded_functor {
|
||||
[&] (parsed::update_expression::action::set& a) {
|
||||
resolve_set_rhs(a._rhs, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::update_expression::action::remove& a) {
|
||||
// nothing to do
|
||||
},
|
||||
[&] (parsed::update_expression::action::add& a) {
|
||||
resolve_constant(a._valref, expression_attribute_values, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::update_expression::action::del& a) {
|
||||
resolve_constant(a._valref, expression_attribute_values, used_attribute_values);
|
||||
}
|
||||
}, action._action);
|
||||
}
|
||||
}
|
||||
|
||||
static void resolve_primitive_condition(parsed::primitive_condition& pc,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
for (parsed::value& value : pc._values) {
|
||||
resolve_value(value,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
}
|
||||
}
|
||||
|
||||
void resolve_condition_expression(parsed::condition_expression& ce,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (parsed::primitive_condition& cond) {
|
||||
resolve_primitive_condition(cond,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::condition_expression::condition_list& list) {
|
||||
for (parsed::condition_expression& cond : list.conditions) {
|
||||
resolve_condition_expression(cond,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
|
||||
}
|
||||
}
|
||||
}, ce._expression);
|
||||
}
|
||||
|
||||
void resolve_projection_expression(std::vector<parsed::path>& pe,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names) {
|
||||
for (parsed::path& p : pe) {
|
||||
resolve_path(p, expression_attribute_names, used_attribute_names);
|
||||
}
|
||||
}
|
||||
|
||||
// condition_expression_on() checks whether a condition_expression places any
|
||||
// condition on the given attribute. It can be useful, for example, for
|
||||
// checking whether the condition tries to restrict a key column.
|
||||
|
||||
static bool value_on(const parsed::value& v, std::string_view attribute) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant& c) {
|
||||
return false;
|
||||
},
|
||||
[&] (const parsed::value::function_call& f) {
|
||||
for (const parsed::value& value : f._parameters) {
|
||||
if (value_on(value, attribute)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
},
|
||||
[&] (const parsed::path& p) {
|
||||
return p.root() == attribute;
|
||||
}
|
||||
}, v._value);
|
||||
}
|
||||
|
||||
static bool primitive_condition_on(const parsed::primitive_condition& pc, std::string_view attribute) {
|
||||
for (const parsed::value& value : pc._values) {
|
||||
if (value_on(value, attribute)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool condition_expression_on(const parsed::condition_expression& ce, std::string_view attribute) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) {
|
||||
return primitive_condition_on(cond, attribute);
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) {
|
||||
for (const parsed::condition_expression& cond : list.conditions) {
|
||||
if (condition_expression_on(cond, attribute)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}, ce._expression);
|
||||
}
|
||||
|
||||
// for_condition_expression_on() runs a given function over all the attributes
|
||||
// mentioned in the expression. If the same attribute is mentioned more than
|
||||
// once, the function will be called more than once for the same attribute.
|
||||
|
||||
static void for_value_on(const parsed::value& v, const noncopyable_function<void(std::string_view)>& func) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant& c) { },
|
||||
[&] (const parsed::value::function_call& f) {
|
||||
for (const parsed::value& value : f._parameters) {
|
||||
for_value_on(value, func);
|
||||
}
|
||||
},
|
||||
[&] (const parsed::path& p) {
|
||||
func(p.root());
|
||||
}
|
||||
}, v._value);
|
||||
}
|
||||
|
||||
void for_condition_expression_on(const parsed::condition_expression& ce, const noncopyable_function<void(std::string_view)>& func) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) {
|
||||
for (const parsed::value& value : cond._values) {
|
||||
for_value_on(value, func);
|
||||
}
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) {
|
||||
for (const parsed::condition_expression& cond : list.conditions) {
|
||||
for_condition_expression_on(cond, func);
|
||||
}
|
||||
}
|
||||
}, ce._expression);
|
||||
}
|
||||
|
||||
// The following calculate_value() functions calculate, or evaluate, a parsed
|
||||
// expression. The parsed expression is assumed to have been "resolved", with
|
||||
// the matching resolve_* function.
|
||||
|
||||
// Take two JSON-encoded list values (remember that a list value is
|
||||
// {"L": [...the actual list]}) and return the concatenation, again as
|
||||
// a list value.
|
||||
static rjson::value list_concatenate(const rjson::value& v1, const rjson::value& v2) {
|
||||
const rjson::value* list1 = unwrap_list(v1);
|
||||
const rjson::value* list2 = unwrap_list(v2);
|
||||
if (!list1 || !list2) {
|
||||
throw api_error("ValidationException", "UpdateExpression: list_append() given a non-list");
|
||||
}
|
||||
rjson::value cat = rjson::copy(*list1);
|
||||
for (const auto& a : list2->GetArray()) {
|
||||
rjson::push_back(cat, rjson::copy(a));
|
||||
}
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::set(ret, "L", std::move(cat));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// calculate_size() is ConditionExpression's size() function, i.e., it takes
|
||||
// a JSON-encoded value and returns its "size" as defined differently for the
|
||||
// different types - also as a JSON-encoded number.
|
||||
// It return a JSON-encoded "null" value if this value's type has no size
|
||||
// defined. Comparisons against this non-numeric value will later fail.
|
||||
static rjson::value calculate_size(const rjson::value& v) {
|
||||
// NOTE: If v is improperly formatted for our JSON value encoding, it
|
||||
// must come from the request itself, not from the database, so it makes
|
||||
// sense to throw a ValidationException if we see such a problem.
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("invalid object: {}", v));
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
int ret;
|
||||
if (it->name == "S") {
|
||||
if (!it->value.IsString()) {
|
||||
throw api_error("ValidationException", format("invalid string: {}", v));
|
||||
}
|
||||
ret = it->value.GetStringLength();
|
||||
} else if (it->name == "NS" || it->name == "SS" || it->name == "BS" || it->name == "L") {
|
||||
if (!it->value.IsArray()) {
|
||||
throw api_error("ValidationException", format("invalid set: {}", v));
|
||||
}
|
||||
ret = it->value.Size();
|
||||
} else if (it->name == "M") {
|
||||
if (!it->value.IsObject()) {
|
||||
throw api_error("ValidationException", format("invalid map: {}", v));
|
||||
}
|
||||
ret = it->value.MemberCount();
|
||||
} else if (it->name == "B") {
|
||||
if (!it->value.IsString()) {
|
||||
throw api_error("ValidationException", format("invalid byte string: {}", v));
|
||||
}
|
||||
ret = base64_decoded_len(rjson::to_string_view(it->value));
|
||||
} else {
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::set(json_ret, "null", rjson::value(true));
|
||||
return json_ret;
|
||||
}
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::set(json_ret, "N", rjson::from_string(std::to_string(ret)));
|
||||
return json_ret;
|
||||
}
|
||||
|
||||
static const rjson::value& calculate_value(const parsed::constant& c) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant::literal& v) -> const rjson::value& {
|
||||
return *v;
|
||||
},
|
||||
[&] (const std::string& valref) -> const rjson::value& {
|
||||
// Shouldn't happen, we should have called resolve_value() earlier
|
||||
// and replaced the value reference by the literal constant.
|
||||
throw std::logic_error("calculate_value() called before resolve_value()");
|
||||
}
|
||||
}, c._value);
|
||||
}
|
||||
|
||||
static rjson::value to_bool_json(bool b) {
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::set(json_ret, "BOOL", rjson::value(b));
|
||||
return json_ret;
|
||||
}
|
||||
|
||||
static bool known_type(std::string_view type) {
|
||||
static thread_local const std::unordered_set<std::string_view> types = {
|
||||
"N", "S", "B", "NS", "SS", "BS", "L", "M", "NULL", "BOOL"
|
||||
};
|
||||
return types.contains(type);
|
||||
}
|
||||
|
||||
using function_handler_type = rjson::value(calculate_value_caller, const rjson::value*, const parsed::value::function_call&);
|
||||
static const
|
||||
std::unordered_map<std::string_view, function_handler_type*> function_handlers {
|
||||
{"list_append", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::UpdateExpression) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: list_append() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: list_append() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return list_concatenate(v1, v2);
|
||||
}
|
||||
},
|
||||
{"if_not_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::UpdateExpression) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: if_not_exists() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: if_not_exists() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
if (!std::holds_alternative<parsed::path>(f._parameters[0]._value)) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: if_not_exists() must include path as its first argument", caller));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return v1.IsNull() ? std::move(v2) : std::move(v1);
|
||||
}
|
||||
},
|
||||
{"size", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpression) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: size() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: size() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return calculate_size(v);
|
||||
}
|
||||
},
|
||||
{"attribute_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_exists() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_exists() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
if (!std::holds_alternative<parsed::path>(f._parameters[0]._value)) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_exists()'s parameter must be a path", caller));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return to_bool_json(!v.IsNull());
|
||||
}
|
||||
},
|
||||
{"attribute_not_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_not_exists() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_not_exists() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
if (!std::holds_alternative<parsed::path>(f._parameters[0]._value)) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_not_exists()'s parameter must be a path", caller));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return to_bool_json(v.IsNull());
|
||||
}
|
||||
},
|
||||
{"attribute_type", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_type() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_type() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
// There is no real reason for the following check (not
|
||||
// allowing the type to come from a document attribute), but
|
||||
// DynamoDB does this check, so we do too...
|
||||
if (!f._parameters[1].is_constant()) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_types()'s first parameter must be an expression attribute", caller));
|
||||
}
|
||||
rjson::value v0 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v1 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
if (v1.IsObject() && v1.MemberCount() == 1 && v1.MemberBegin()->name == "S") {
|
||||
// If the type parameter is not one of the legal types
|
||||
// we should generate an error, not a failed condition:
|
||||
if (!known_type(rjson::to_string_view(v1.MemberBegin()->value))) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_types()'s second parameter, {}, is not a known type",
|
||||
caller, v1.MemberBegin()->value));
|
||||
}
|
||||
if (v0.IsObject() && v0.MemberCount() == 1) {
|
||||
return to_bool_json(v1.MemberBegin()->value == v0.MemberBegin()->name);
|
||||
} else {
|
||||
return to_bool_json(false);
|
||||
}
|
||||
} else {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: attribute_type() second parameter must refer to a string, got {}", caller, v1));
|
||||
}
|
||||
}
|
||||
},
|
||||
{"begins_with", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: begins_with() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: begins_with() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return to_bool_json(check_BEGINS_WITH(v1.IsNull() ? nullptr : &v1, v2,
|
||||
f._parameters[0].is_constant(), f._parameters[1].is_constant()));
|
||||
}
|
||||
},
|
||||
{"contains", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: contains() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error("ValidationException",
|
||||
format("{}: contains() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return to_bool_json(check_CONTAINS(v1.IsNull() ? nullptr : &v1, v2));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Given a parsed::value, which can refer either to a constant value from
|
||||
// ExpressionAttributeValues, to the value of some attribute, or to a function
|
||||
// of other values, this function calculates the resulting value.
|
||||
// "caller" determines which expression - ConditionExpression or
|
||||
// UpdateExpression - is asking for this value. We need to know this because
|
||||
// DynamoDB allows a different choice of functions for different expressions.
|
||||
rjson::value calculate_value(const parsed::value& v,
|
||||
calculate_value_caller caller,
|
||||
const rjson::value* previous_item) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant& c) -> rjson::value {
|
||||
return rjson::copy(calculate_value(c));
|
||||
},
|
||||
[&] (const parsed::value::function_call& f) -> rjson::value {
|
||||
auto function_it = function_handlers.find(std::string_view(f._function_name));
|
||||
if (function_it == function_handlers.end()) {
|
||||
throw api_error("ValidationException",
|
||||
format("UpdateExpression: unknown function '{}' called.", f._function_name));
|
||||
}
|
||||
return function_it->second(caller, previous_item, f);
|
||||
},
|
||||
[&] (const parsed::path& p) -> rjson::value {
|
||||
if (!previous_item) {
|
||||
return rjson::null_value();
|
||||
}
|
||||
std::string update_path = p.root();
|
||||
if (p.has_operators()) {
|
||||
// FIXME: support this
|
||||
throw api_error("ValidationException", "Reading attribute paths not yet implemented");
|
||||
}
|
||||
const rjson::value* previous_value = rjson::find(*previous_item, update_path);
|
||||
return previous_value ? rjson::copy(*previous_value) : rjson::null_value();
|
||||
}
|
||||
}, v._value);
|
||||
}
|
||||
|
||||
// Same as calculate_value() above, except takes a set_rhs, which may be
|
||||
// either a single value, or v1+v2 or v1-v2.
|
||||
rjson::value calculate_value(const parsed::set_rhs& rhs,
|
||||
const rjson::value* previous_item) {
|
||||
switch(rhs._op) {
|
||||
case 'v':
|
||||
return calculate_value(rhs._v1, calculate_value_caller::UpdateExpression, previous_item);
|
||||
case '+': {
|
||||
rjson::value v1 = calculate_value(rhs._v1, calculate_value_caller::UpdateExpression, previous_item);
|
||||
rjson::value v2 = calculate_value(rhs._v2, calculate_value_caller::UpdateExpression, previous_item);
|
||||
return number_add(v1, v2);
|
||||
}
|
||||
case '-': {
|
||||
rjson::value v1 = calculate_value(rhs._v1, calculate_value_caller::UpdateExpression, previous_item);
|
||||
rjson::value v2 = calculate_value(rhs._v2, calculate_value_caller::UpdateExpression, previous_item);
|
||||
return number_subtract(v1, v2);
|
||||
}
|
||||
}
|
||||
// Can't happen
|
||||
return rjson::null_value();
|
||||
}
|
||||
|
||||
} // namespace alternator
|
||||
|
||||
@@ -24,13 +24,8 @@
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
#include <unordered_set>
|
||||
#include <string_view>
|
||||
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
|
||||
#include "expressions_types.hh"
|
||||
#include "rjson.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -43,60 +38,4 @@ parsed::update_expression parse_update_expression(std::string query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string query);
|
||||
parsed::condition_expression parse_condition_expression(std::string query);
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values);
|
||||
void resolve_projection_expression(std::vector<parsed::path>& pe,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names);
|
||||
void resolve_condition_expression(parsed::condition_expression& ce,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values);
|
||||
|
||||
void validate_value(const rjson::value& v, const char* caller);
|
||||
|
||||
bool condition_expression_on(const parsed::condition_expression& ce, std::string_view attribute);
|
||||
|
||||
// for_condition_expression_on() runs the given function on the attributes
|
||||
// that the expression uses. It may run for the same attribute more than once
|
||||
// if the same attribute is used more than once in the expression.
|
||||
void for_condition_expression_on(const parsed::condition_expression& ce, const noncopyable_function<void(std::string_view)>& func);
|
||||
|
||||
// calculate_value() behaves slightly different (especially, different
|
||||
// functions supported) when used in different types of expressions, as
|
||||
// enumerated in this enum:
|
||||
enum class calculate_value_caller {
|
||||
UpdateExpression, ConditionExpression, ConditionExpressionAlone
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, calculate_value_caller caller) {
|
||||
switch (caller) {
|
||||
case calculate_value_caller::UpdateExpression:
|
||||
out << "UpdateExpression";
|
||||
break;
|
||||
case calculate_value_caller::ConditionExpression:
|
||||
out << "ConditionExpression";
|
||||
break;
|
||||
case calculate_value_caller::ConditionExpressionAlone:
|
||||
out << "ConditionExpression";
|
||||
break;
|
||||
default:
|
||||
out << "unknown type of expression";
|
||||
break;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
rjson::value calculate_value(const parsed::value& v,
|
||||
calculate_value_caller caller,
|
||||
const rjson::value* previous_item);
|
||||
|
||||
rjson::value calculate_value(const parsed::set_rhs& rhs,
|
||||
const rjson::value* previous_item);
|
||||
|
||||
|
||||
} /* namespace alternator */
|
||||
|
||||
78
alternator/expressions_eval.hh
Normal file
78
alternator/expressions_eval.hh
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Copyright 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "rjson.hh"
|
||||
#include "schema_fwd.hh"
|
||||
|
||||
#include "expressions_types.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// calculate_value() behaves slightly different (especially, different
|
||||
// functions supported) when used in different types of expressions, as
|
||||
// enumerated in this enum:
|
||||
enum class calculate_value_caller {
|
||||
UpdateExpression, ConditionExpression, ConditionExpressionAlone
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, calculate_value_caller caller) {
|
||||
switch (caller) {
|
||||
case calculate_value_caller::UpdateExpression:
|
||||
out << "UpdateExpression";
|
||||
break;
|
||||
case calculate_value_caller::ConditionExpression:
|
||||
out << "ConditionExpression";
|
||||
break;
|
||||
case calculate_value_caller::ConditionExpressionAlone:
|
||||
out << "ConditionExpression";
|
||||
break;
|
||||
default:
|
||||
out << "unknown type of expression";
|
||||
break;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2);
|
||||
|
||||
rjson::value calculate_value(const parsed::value& v,
|
||||
calculate_value_caller caller,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
const rjson::value& update_info,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item);
|
||||
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
const rjson::value& req,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item);
|
||||
|
||||
} /* namespace alternator */
|
||||
@@ -25,10 +25,6 @@
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
|
||||
#include "rjson.hh"
|
||||
|
||||
/*
|
||||
* Parsed representation of expressions and their components.
|
||||
*
|
||||
@@ -67,27 +63,10 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// When an expression is first parsed, all constants are references, like
|
||||
// ":val1", into ExpressionAttributeValues. This uses std::string() variant.
|
||||
// The resolve_value() function replaces these constants by the JSON item
|
||||
// extracted from the ExpressionAttributeValues.
|
||||
struct constant {
|
||||
// We use lw_shared_ptr<rjson::value> just to make rjson::value copyable,
|
||||
// to make this entire object copyable as ANTLR needs.
|
||||
using literal = lw_shared_ptr<rjson::value>;
|
||||
std::variant<std::string, literal> _value;
|
||||
void set(const rjson::value& v) {
|
||||
_value = make_lw_shared<rjson::value>(rjson::copy(v));
|
||||
}
|
||||
void set(std::string& s) {
|
||||
_value = s;
|
||||
}
|
||||
};
|
||||
|
||||
// "value" is is a value used in the right hand side of an assignment
|
||||
// expression, "SET a = ...". It can be a constant (a reference to a value
|
||||
// included in the request, e.g., ":val"), a path to an attribute from the
|
||||
// existing item (e.g., "a.b[3].c"), or a function of other such values.
|
||||
// expression, "SET a = ...". It can be a reference to a value included in
|
||||
// the request (":val"), a path to an attribute from the existing item
|
||||
// (e.g., "a.b[3].c"), or a function of other such values.
|
||||
// Note that the real right-hand-side of an assignment is actually a bit
|
||||
// more general - it allows either a value, or a value+value or value-value -
|
||||
// see class set_rhs below.
|
||||
@@ -96,12 +75,9 @@ struct value {
|
||||
std::string _function_name;
|
||||
std::vector<value> _parameters;
|
||||
};
|
||||
std::variant<constant, path, function_call> _value;
|
||||
void set_constant(constant c) {
|
||||
_value = std::move(c);
|
||||
}
|
||||
std::variant<std::string, path, function_call> _value;
|
||||
void set_valref(std::string s) {
|
||||
_value = constant { std::move(s) };
|
||||
_value = std::move(s);
|
||||
}
|
||||
void set_path(path p) {
|
||||
_value = std::move(p);
|
||||
@@ -112,8 +88,8 @@ struct value {
|
||||
void add_func_parameter(value v) {
|
||||
std::get<function_call>(_value)._parameters.emplace_back(std::move(v));
|
||||
}
|
||||
bool is_constant() const {
|
||||
return std::holds_alternative<constant>(_value);
|
||||
bool is_valref() const {
|
||||
return std::holds_alternative<std::string>(_value);
|
||||
}
|
||||
bool is_path() const {
|
||||
return std::holds_alternative<path>(_value);
|
||||
@@ -154,10 +130,10 @@ public:
|
||||
struct remove {
|
||||
};
|
||||
struct add {
|
||||
constant _valref;
|
||||
std::string _valref;
|
||||
};
|
||||
struct del {
|
||||
constant _valref;
|
||||
std::string _valref;
|
||||
};
|
||||
std::variant<set, remove, add, del> _action;
|
||||
|
||||
@@ -171,11 +147,11 @@ public:
|
||||
}
|
||||
void assign_add(path p, std::string v) {
|
||||
_path = std::move(p);
|
||||
_action = add { constant { std::move(v) } };
|
||||
_action = add { std::move(v) };
|
||||
}
|
||||
void assign_del(path p, std::string v) {
|
||||
_path = std::move(p);
|
||||
_action = del { constant { std::move(v) } };
|
||||
_action = del { std::move(v) };
|
||||
}
|
||||
};
|
||||
private:
|
||||
@@ -193,9 +169,6 @@ public:
|
||||
const std::vector<action>& actions() const {
|
||||
return _actions;
|
||||
}
|
||||
std::vector<action>& actions() {
|
||||
return _actions;
|
||||
}
|
||||
};
|
||||
|
||||
// A primitive_condition is a condition expression involving one condition,
|
||||
|
||||
@@ -123,7 +123,7 @@ protected:
|
||||
|
||||
std::string print(const rjson::value& value) {
|
||||
string_buffer buffer;
|
||||
guarded_yieldable_json_handler<writer, false> writer(buffer, 78);
|
||||
guarded_yieldable_json_handler<writer, false> writer(buffer, 39);
|
||||
value.Accept(writer);
|
||||
return std::string(buffer.GetString());
|
||||
}
|
||||
@@ -133,7 +133,7 @@ rjson::value copy(const rjson::value& value) {
|
||||
}
|
||||
|
||||
rjson::value parse(std::string_view str) {
|
||||
guarded_yieldable_json_handler<document, false> d(78);
|
||||
guarded_yieldable_json_handler<document, false> d(39);
|
||||
d.Parse(str.data(), str.size());
|
||||
if (d.HasParseError()) {
|
||||
throw rjson::error(format("Parsing JSON failed: {}", GetParseError_En(d.GetParseError())));
|
||||
@@ -143,7 +143,7 @@ rjson::value parse(std::string_view str) {
|
||||
}
|
||||
|
||||
rjson::value parse_yieldable(std::string_view str) {
|
||||
guarded_yieldable_json_handler<document, true> d(78);
|
||||
guarded_yieldable_json_handler<document, true> d(39);
|
||||
d.Parse(str.data(), str.size());
|
||||
if (d.HasParseError()) {
|
||||
throw rjson::error(format("Parsing JSON failed: {}", GetParseError_En(d.GetParseError())));
|
||||
|
||||
@@ -21,9 +21,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include <seastarx.hh>
|
||||
#include <service/storage_proxy.hh>
|
||||
#include <service/storage_proxy.hh>
|
||||
#include "rjson.hh"
|
||||
#include "executor.hh"
|
||||
|
||||
@@ -63,10 +63,6 @@ public:
|
||||
|
||||
static write_isolation get_write_isolation_for_schema(schema_ptr schema);
|
||||
|
||||
static write_isolation default_write_isolation;
|
||||
public:
|
||||
static void set_default_write_isolation(std::string_view mode);
|
||||
|
||||
protected:
|
||||
// The full request JSON
|
||||
rjson::value _request;
|
||||
@@ -111,7 +107,7 @@ public:
|
||||
// "mutable" above so that apply() can still write to it.
|
||||
virtual std::optional<mutation> apply(std::unique_ptr<rjson::value> previous_item, api::timestamp_type ts) const = 0;
|
||||
// Convert the above apply() into the signature needed by cas_request:
|
||||
virtual std::optional<mutation> apply(foreign_ptr<lw_shared_ptr<query::result>> qr, const query::partition_slice& slice, api::timestamp_type ts) override;
|
||||
virtual std::optional<mutation> apply(query::result& qr, const query::partition_slice& slice, api::timestamp_type ts) override;
|
||||
virtual ~rmw_operation() = default;
|
||||
schema_ptr schema() const { return _schema; }
|
||||
const rjson::value& request() const { return _request; }
|
||||
|
||||
@@ -31,8 +31,8 @@ static logging::logger slogger("alternator-serialization");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
type_info type_info_from_string(std::string_view type) {
|
||||
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {
|
||||
type_info type_info_from_string(std::string type) {
|
||||
static thread_local const std::unordered_map<std::string, type_info> type_infos = {
|
||||
{"S", {alternator_type::S, utf8_type}},
|
||||
{"B", {alternator_type::B, bytes_type}},
|
||||
{"BOOL", {alternator_type::BOOL, boolean_type}},
|
||||
@@ -87,7 +87,7 @@ bytes serialize_item(const rjson::value& item) {
|
||||
throw api_error("ValidationException", format("An item can contain only one attribute definition: {}", item));
|
||||
}
|
||||
auto it = item.MemberBegin();
|
||||
type_info type_info = type_info_from_string(rjson::to_string_view(it->name)); // JSON keys are guaranteed to be strings
|
||||
type_info type_info = type_info_from_string(it->name.GetString()); // JSON keys are guaranteed to be strings
|
||||
|
||||
if (type_info.atype == alternator_type::NOT_SUPPORTED_YET) {
|
||||
slogger.trace("Non-optimal serialization of type {}", it->name.GetString());
|
||||
@@ -121,7 +121,7 @@ struct to_json_visitor {
|
||||
}
|
||||
// default
|
||||
void operator()(const abstract_type& t) const {
|
||||
rjson::set_with_string_name(deserialized, type_ident, rjson::parse(to_json_string(t, bytes(bv))));
|
||||
rjson::set_with_string_name(deserialized, type_ident, rjson::parse(t.to_string(bytes(bv))));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -153,9 +153,7 @@ std::string type_to_string(data_type type) {
|
||||
};
|
||||
auto it = types.find(type);
|
||||
if (it == types.end()) {
|
||||
// fall back to string, in order to be able to present
|
||||
// internal Scylla types in a human-readable way
|
||||
return "S";
|
||||
throw std::runtime_error(format("Unknown type {}", type->name()));
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
@@ -186,11 +184,6 @@ bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column
|
||||
format("Type mismatch: expected type {} for key column {}, got type {}",
|
||||
type_to_string(column.type), column.name_as_text(), it->name.GetString()));
|
||||
}
|
||||
std::string_view value_view = rjson::to_string_view(it->value);
|
||||
if (value_view.empty()) {
|
||||
throw api_error("ValidationException",
|
||||
format("The AttributeValue for a key attribute cannot contain an empty string value. Key: {}", column.name_as_text()));
|
||||
}
|
||||
if (column.type == bytes_type) {
|
||||
return base64_decode(it->value);
|
||||
} else {
|
||||
@@ -212,11 +205,8 @@ rjson::value json_key_column_value(bytes_view cell, const column_definition& col
|
||||
auto s = to_json_string(*decimal_type, bytes(cell));
|
||||
return rjson::from_string(s);
|
||||
} else {
|
||||
// Support for arbitrary key types is useful for parsing values of virtual tables,
|
||||
// which can involve any type supported by Scylla.
|
||||
// In order to guarantee that the returned type is parsable by alternator clients,
|
||||
// they are represented simply as strings.
|
||||
return rjson::from_string(column.type->to_string(bytes(cell)));
|
||||
// We shouldn't get here, we shouldn't see such key columns.
|
||||
throw std::runtime_error(format("Unexpected key type: {}", column.type->name()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -275,93 +265,4 @@ const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value&
|
||||
return std::make_pair(it_key, &(it->value));
|
||||
}
|
||||
|
||||
const rjson::value* unwrap_list(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return nullptr;
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
if (it->name != std::string("L")) {
|
||||
return nullptr;
|
||||
}
|
||||
return &(it->value);
|
||||
}
|
||||
|
||||
// Take two JSON-encoded numeric values ({"N": "thenumber"}) and return the
|
||||
// sum, again as a JSON-encoded number.
|
||||
rjson::value number_add(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto n1 = unwrap_number(v1, "UpdateExpression");
|
||||
auto n2 = unwrap_number(v2, "UpdateExpression");
|
||||
rjson::value ret = rjson::empty_object();
|
||||
std::string str_ret = std::string((n1 + n2).to_string());
|
||||
rjson::set(ret, "N", rjson::from_string(str_ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
rjson::value number_subtract(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto n1 = unwrap_number(v1, "UpdateExpression");
|
||||
auto n2 = unwrap_number(v2, "UpdateExpression");
|
||||
rjson::value ret = rjson::empty_object();
|
||||
std::string str_ret = std::string((n1 - n2).to_string());
|
||||
rjson::set(ret, "N", rjson::from_string(str_ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual set]}) and
|
||||
// return the sum of both sets, again as a set value.
|
||||
rjson::value set_sum(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto [set1_type, set1] = unwrap_set(v1);
|
||||
auto [set2_type, set2] = unwrap_set(v2);
|
||||
if (set1_type != set2_type) {
|
||||
throw api_error("ValidationException", format("Mismatched set types: {} and {}", set1_type, set2_type));
|
||||
}
|
||||
if (!set1 || !set2) {
|
||||
throw api_error("ValidationException", "UpdateExpression: ADD operation for sets must be given sets as arguments");
|
||||
}
|
||||
rjson::value sum = rjson::copy(*set1);
|
||||
std::set<rjson::value, rjson::single_value_comp> set1_raw;
|
||||
for (auto it = sum.Begin(); it != sum.End(); ++it) {
|
||||
set1_raw.insert(rjson::copy(*it));
|
||||
}
|
||||
for (const auto& a : set2->GetArray()) {
|
||||
if (set1_raw.count(a) == 0) {
|
||||
rjson::push_back(sum, rjson::copy(a));
|
||||
}
|
||||
}
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::set_with_string_name(ret, set1_type, std::move(sum));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual list]}) and
|
||||
// return the difference of s1 - s2, again as a set value.
|
||||
// DynamoDB does not allow empty sets, so if resulting set is empty, return
|
||||
// an unset optional instead.
|
||||
std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto [set1_type, set1] = unwrap_set(v1);
|
||||
auto [set2_type, set2] = unwrap_set(v2);
|
||||
if (set1_type != set2_type) {
|
||||
throw api_error("ValidationException", format("Mismatched set types: {} and {}", set1_type, set2_type));
|
||||
}
|
||||
if (!set1 || !set2) {
|
||||
throw api_error("ValidationException", "UpdateExpression: DELETE operation can only be performed on a set");
|
||||
}
|
||||
std::set<rjson::value, rjson::single_value_comp> set1_raw;
|
||||
for (auto it = set1->Begin(); it != set1->End(); ++it) {
|
||||
set1_raw.insert(rjson::copy(*it));
|
||||
}
|
||||
for (const auto& a : set2->GetArray()) {
|
||||
set1_raw.erase(a);
|
||||
}
|
||||
if (set1_raw.empty()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::set_with_string_name(ret, set1_type, rjson::empty_array());
|
||||
rjson::value& result_set = ret[set1_type];
|
||||
for (const auto& a : set1_raw) {
|
||||
rjson::push_back(result_set, rjson::copy(a));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ struct type_representation {
|
||||
data_type dtype;
|
||||
};
|
||||
|
||||
type_info type_info_from_string(std::string_view type);
|
||||
type_info type_info_from_string(std::string type);
|
||||
type_representation represent_type(alternator_type atype);
|
||||
|
||||
bytes serialize_item(const rjson::value& item);
|
||||
@@ -69,21 +69,4 @@ big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
||||
// returned value is {"", nullptr}
|
||||
const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value& v);
|
||||
|
||||
// Check if a given JSON object encodes a list (i.e., it is a {"L": [...]}
|
||||
// and returns a pointer to that list.
|
||||
const rjson::value* unwrap_list(const rjson::value& v);
|
||||
|
||||
// Take two JSON-encoded numeric values ({"N": "thenumber"}) and return the
|
||||
// sum, again as a JSON-encoded number.
|
||||
rjson::value number_add(const rjson::value& v1, const rjson::value& v2);
|
||||
rjson::value number_subtract(const rjson::value& v1, const rjson::value& v2);
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual set]}) and
|
||||
// return the sum of both sets, again as a set value.
|
||||
rjson::value set_sum(const rjson::value& v1, const rjson::value& v2);
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual list]}) and
|
||||
// return the difference of s1 - s2, again as a set value.
|
||||
// DynamoDB does not allow empty sets, so if resulting set is empty, return
|
||||
// an unset optional instead.
|
||||
std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value& v2);
|
||||
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
#include "log.hh"
|
||||
#include <seastar/http/function_handlers.hh>
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include "seastarx.hh"
|
||||
#include <seastarx.hh>
|
||||
#include "error.hh"
|
||||
#include "rjson.hh"
|
||||
#include "auth.hh"
|
||||
@@ -69,7 +69,7 @@ class api_handler : public handler_base {
|
||||
public:
|
||||
api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle) : _f_handle(
|
||||
[this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
|
||||
return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped([this, rep = std::move(rep)](future<executor::request_return_type> resf) mutable {
|
||||
return seastar::futurize_apply(_handle, std::move(req)).then_wrapped([this, rep = std::move(rep)](future<executor::request_return_type> resf) mutable {
|
||||
if (resf.failed()) {
|
||||
// Exceptions of type api_error are wrapped as JSON and
|
||||
// returned to the client as expected. Other types of
|
||||
@@ -409,19 +409,15 @@ future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std:
|
||||
_http_server.set_content_length_limit(server::content_length_limit);
|
||||
_http_server.listen(socket_address{addr, *port}).get();
|
||||
_enabled_servers.push_back(std::ref(_http_server));
|
||||
slogger.info("Alternator HTTP server listening on {} port {}", addr, *port);
|
||||
}
|
||||
if (https_port) {
|
||||
set_routes(_https_server._routes);
|
||||
_https_server.set_content_length_limit(server::content_length_limit);
|
||||
_https_server.set_tls_credentials(creds->build_reloadable_server_credentials([](const std::unordered_set<sstring>& files, std::exception_ptr ep) {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get0());
|
||||
_https_server.set_tls_credentials(creds->build_server_credentials());
|
||||
_https_server.listen(socket_address{addr, *https_port}).get();
|
||||
_enabled_servers.push_back(std::ref(_https_server));
|
||||
slogger.info("Alternator HTTPS server listening on {} port {}", addr, *https_port);
|
||||
}
|
||||
} catch (...) {
|
||||
slogger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}: {}",
|
||||
|
||||
@@ -26,8 +26,8 @@
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include <seastar/net/tls.hh>
|
||||
#include <optional>
|
||||
#include "alternator/auth.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include <alternator/auth.hh>
|
||||
#include <utils/small_vector.hh>
|
||||
#include <seastar/core/units.hh>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -380,54 +380,16 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"check if the auto_compaction property is enabled for a given table",
|
||||
"summary":"check if the auto compaction disabled",
|
||||
"type":"boolean",
|
||||
"nickname":"get_auto_compaction",
|
||||
"nickname":"is_auto_compaction_disabled",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Enable table auto compaction",
|
||||
"type":"void",
|
||||
"nickname":"enable_auto_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Disable table auto compaction",
|
||||
"type":"void",
|
||||
"nickname":"disable_auto_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"description":"The column family name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
|
||||
@@ -511,21 +511,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/cdc_streams_check_and_repair",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Checks that CDC streams reflect current cluster topology and regenerates them if not.",
|
||||
"type":"void",
|
||||
"nickname":"cdc_streams_check_and_repair",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/snapshots",
|
||||
"operations":[
|
||||
|
||||
16
api/api.cc
16
api/api.cc
@@ -93,22 +93,6 @@ static future<> register_api(http_context& ctx, const sstring& api_name,
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_transport_controller(http_context& ctx, cql_transport::controller& ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &ctl] (routes& r) { set_transport_controller(ctx, r, ctl); });
|
||||
}
|
||||
|
||||
future<> unset_transport_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_transport_controller(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &ctl] (routes& r) { set_rpc_controller(ctx, r, ctl); });
|
||||
}
|
||||
|
||||
future<> unset_rpc_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_rpc_controller(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_storage_service(http_context& ctx) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", set_storage_service);
|
||||
}
|
||||
|
||||
@@ -25,8 +25,6 @@
|
||||
|
||||
namespace service { class load_meter; }
|
||||
namespace locator { class token_metadata; }
|
||||
namespace cql_transport { class controller; }
|
||||
class thrift_controller;
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -50,10 +48,6 @@ future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx);
|
||||
future<> set_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx);
|
||||
future<> set_transport_controller(http_context& ctx, cql_transport::controller& ctl);
|
||||
future<> unset_transport_controller(http_context& ctx);
|
||||
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl);
|
||||
future<> unset_rpc_controller(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx);
|
||||
future<> set_server_load_sstable(http_context& ctx);
|
||||
|
||||
@@ -208,11 +208,9 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cs::get_row_capacity.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().region().occupancy().used_space();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().get_cache_tracker().region().occupancy().used_space();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -253,19 +251,15 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// In origin row size is the weighted size.
|
||||
// We currently do not support weights, so we use num entries instead
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().partitions();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().partitions();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_counter_capacity.set(r, [] (std::unique_ptr<request> req) {
|
||||
|
||||
@@ -650,7 +650,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_bloom_filter_disk_space_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_size();
|
||||
return sst->filter_size();
|
||||
});
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
@@ -658,7 +658,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_all_bloom_filter_disk_space_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_size();
|
||||
return sst->filter_size();
|
||||
});
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
@@ -666,7 +666,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_bloom_filter_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_memory_size();
|
||||
return sst->filter_memory_size();
|
||||
});
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
@@ -674,7 +674,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_all_bloom_filter_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_memory_size();
|
||||
return sst->filter_memory_size();
|
||||
});
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
@@ -682,7 +682,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->get_summary().memory_footprint();
|
||||
return sst->get_summary().memory_footprint();
|
||||
});
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
@@ -690,7 +690,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_all_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->get_summary().memory_footprint();
|
||||
return sst->get_summary().memory_footprint();
|
||||
});
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
@@ -804,14 +804,14 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
cf::get_cas_propose.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_accept;
|
||||
return cf.get_stats().estimated_cas_propose;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
});
|
||||
|
||||
cf::get_cas_commit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_learn;
|
||||
return cf.get_stats().estimated_cas_commit;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
});
|
||||
@@ -839,26 +839,11 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
cf::get_auto_compaction.set(r, [&ctx] (const_req req) {
|
||||
const utils::UUID& uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
return !cf.is_auto_compaction_disabled_by_user();
|
||||
});
|
||||
|
||||
cf::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
cf.enable_auto_compaction();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
cf::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
cf.disable_auto_compaction();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
cf::is_auto_compaction_disabled.set(r, [] (const_req req) {
|
||||
// FIXME
|
||||
// currently auto compaction is disable
|
||||
// it should be changed when it would have an API
|
||||
return true;
|
||||
});
|
||||
|
||||
cf::get_built_indexes.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
*/
|
||||
|
||||
#include "commitlog.hh"
|
||||
#include "db/commitlog/commitlog.hh"
|
||||
#include <db/commitlog/commitlog.hh>
|
||||
#include "api/api-doc/commitlog.json.hh"
|
||||
#include "database.hh"
|
||||
#include <vector>
|
||||
|
||||
@@ -37,9 +37,8 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
sstring injection = req->param["injection"];
|
||||
bool one_shot = req->get_query_param("one_shot") == "True";
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.enable_on_all(injection, one_shot).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
errinj.enable_on_all(injection, one_shot);
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
|
||||
hf::get_enabled_injections_on_all.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -52,16 +51,14 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
sstring injection = req->param["injection"];
|
||||
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all(injection).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
errinj.disable_on_all(injection);
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
|
||||
hf::disable_on_all.set(r, [](std::unique_ptr<request> req) {
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all().then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
errinj.disable_on_all();
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
#include "gossiper.hh"
|
||||
#include "api/api-doc/gossiper.json.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include <gms/gossiper.hh>
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
|
||||
@@ -116,23 +116,6 @@ static future<json::json_return_type> sum_timed_rate_as_long(distributed<proxy>
|
||||
});
|
||||
}
|
||||
|
||||
utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimated_histogram& val) {
|
||||
utils_json::estimated_histogram res;
|
||||
for (size_t i = 0; i < val.size(); i++) {
|
||||
res.buckets.push(val.get(i));
|
||||
res.bucket_offsets.push(val.get_bucket_lower_limit(i));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::time_estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(ctx.sp, f, utils::time_estimated_histogram_merge,
|
||||
utils::time_estimated_histogram()).then([](const utils::time_estimated_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(time_to_json_histogram(val));
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(ctx.sp, f, utils::estimated_histogram_merge,
|
||||
|
||||
@@ -41,8 +41,6 @@
|
||||
#include "sstables/sstables.hh"
|
||||
#include "database.hh"
|
||||
#include "db/extensions.hh"
|
||||
#include "transport/controller.hh"
|
||||
#include "thrift/controller.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -87,68 +85,6 @@ static auto wrap_ks_cf(http_context &ctx, ks_cf_func f) {
|
||||
};
|
||||
}
|
||||
|
||||
future<json::json_return_type> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
|
||||
return service::get_local_storage_service().set_tables_autocompaction(keyspace, tables, enabled).then([]{
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
}
|
||||
|
||||
void set_transport_controller(http_context& ctx, routes& r, cql_transport::controller& ctl) {
|
||||
ss::start_native_transport.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return ctl.start_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_native_transport.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return ctl.stop_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_native_transport_running.set(r, [&ctl] (std::unique_ptr<request> req) {
|
||||
return ctl.is_server_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void unset_transport_controller(http_context& ctx, routes& r) {
|
||||
ss::start_native_transport.unset(r);
|
||||
ss::stop_native_transport.unset(r);
|
||||
ss::is_native_transport_running.unset(r);
|
||||
}
|
||||
|
||||
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl) {
|
||||
ss::stop_rpc_server.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return ctl.stop_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_rpc_server.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return ctl.start_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_rpc_server_running.set(r, [&ctl] (std::unique_ptr<request> req) {
|
||||
return ctl.is_server_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void unset_rpc_controller(http_context& ctx, routes& r) {
|
||||
ss::stop_rpc_server.unset(r);
|
||||
ss::start_rpc_server.unset(r);
|
||||
ss::is_rpc_server_running.unset(r);
|
||||
}
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r) {
|
||||
ss::local_hostid.set(r, [](std::unique_ptr<request> req) {
|
||||
return db::system_keyspace::get_local_host_id().then([](const utils::UUID& id) {
|
||||
@@ -279,12 +215,6 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
req.get_query_param("key")));
|
||||
});
|
||||
|
||||
ss::cdc_streams_check_and_repair.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().check_and_repair_cdc_streams().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
@@ -549,6 +479,42 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_rpc_server.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().stop_rpc_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_rpc_server.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().start_rpc_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_rpc_server_running.set(r, [] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().is_rpc_server_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_native_transport.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().start_native_transport().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_native_transport.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().stop_native_transport().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_native_transport_running.set(r, [] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().is_native_transport_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
|
||||
ss::join_ring.set(r, [](std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -678,7 +644,7 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
|
||||
ss::set_trace_probability.set(r, [](std::unique_ptr<request> req) {
|
||||
auto probability = req->get_query_param("probability");
|
||||
return futurize_invoke([probability] {
|
||||
return futurize<json::json_return_type>::apply([probability] {
|
||||
double real_prob = std::stod(probability.c_str());
|
||||
return tracing::tracing::tracing_instance().invoke_on_all([real_prob] (auto& local_tracing) {
|
||||
local_tracing.set_trace_probability(real_prob);
|
||||
@@ -733,17 +699,19 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = split_cf(req->get_query_param("cf"));
|
||||
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = split_cf(req->get_query_param("cf"));
|
||||
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::deliver_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -1020,12 +988,12 @@ void set_snapshot(http_context& ctx, routes& r) {
|
||||
|
||||
ss::take_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto column_families = split(req->get_query_param("cf"), ",");
|
||||
auto column_family = req->get_query_param("cf");
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
|
||||
auto resp = make_ready_future<>();
|
||||
if (column_families.empty()) {
|
||||
if (column_family.empty()) {
|
||||
resp = service::get_local_storage_service().take_snapshot(tag, keynames);
|
||||
} else {
|
||||
if (keynames.empty()) {
|
||||
@@ -1034,7 +1002,7 @@ void set_snapshot(http_context& ctx, routes& r) {
|
||||
if (keynames.size() > 1) {
|
||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||
}
|
||||
resp = service::get_local_storage_service().take_column_family_snapshot(keynames[0], column_families, tag);
|
||||
resp = service::get_local_storage_service().take_column_family_snapshot(keynames[0], column_family, tag);
|
||||
}
|
||||
return resp.then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
|
||||
@@ -23,16 +23,9 @@
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace cql_transport { class controller; }
|
||||
class thrift_controller;
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r);
|
||||
void set_transport_controller(http_context& ctx, routes& r, cql_transport::controller& ctl);
|
||||
void unset_transport_controller(http_context& ctx, routes& r);
|
||||
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl);
|
||||
void unset_rpc_controller(http_context& ctx, routes& r);
|
||||
void set_snapshot(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "api/api-doc/system.json.hh"
|
||||
#include "api/api.hh"
|
||||
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include <seastar/net//byteorder.hh>
|
||||
#include <cstdint>
|
||||
#include <iosfwd>
|
||||
#include <seastar/util/gcc6-concepts.hh>
|
||||
#include "data/cell.hh"
|
||||
#include "data/schema_info.hh"
|
||||
#include "imr/utils.hh"
|
||||
|
||||
@@ -65,16 +65,16 @@ static future<> create_metadata_table_if_missing_impl(
|
||||
std::string_view cql,
|
||||
::service::migration_manager& mm) {
|
||||
static auto ignore_existing = [] (seastar::noncopyable_function<future<>()> func) {
|
||||
return futurize_invoke(std::move(func)).handle_exception_type([] (exceptions::already_exists_exception& ignored) { });
|
||||
return futurize_apply(std::move(func)).handle_exception_type([] (exceptions::already_exists_exception& ignored) { });
|
||||
};
|
||||
auto& db = qp.db();
|
||||
auto parsed_statement = cql3::query_processor::parse_statement(cql);
|
||||
auto& parsed_cf_statement = static_cast<cql3::statements::raw::cf_statement&>(*parsed_statement);
|
||||
auto parsed_statement = static_pointer_cast<cql3::statements::raw::cf_statement>(
|
||||
cql3::query_processor::parse_statement(cql));
|
||||
|
||||
parsed_cf_statement.prepare_keyspace(meta::AUTH_KS);
|
||||
parsed_statement->prepare_keyspace(meta::AUTH_KS);
|
||||
|
||||
auto statement = static_pointer_cast<cql3::statements::create_table_statement>(
|
||||
parsed_cf_statement.prepare(db, qp.get_cql_stats())->statement);
|
||||
parsed_statement->prepare(db, qp.get_cql_stats())->statement);
|
||||
|
||||
const auto schema = statement->get_cf_meta_data(qp.db());
|
||||
const auto uuid = generate_legacy_id(schema->ks_name(), schema->cf_name());
|
||||
@@ -92,7 +92,7 @@ future<> create_metadata_table_if_missing(
|
||||
cql3::query_processor& qp,
|
||||
std::string_view cql,
|
||||
::service::migration_manager& mm) noexcept {
|
||||
return futurize_invoke(create_metadata_table_if_missing_impl, table_name, qp, cql, mm);
|
||||
return futurize_apply(create_metadata_table_if_missing_impl, table_name, qp, cql, mm);
|
||||
}
|
||||
|
||||
future<> wait_for_schema_agreement(::service::migration_manager& mm, const database& db, seastar::abort_source& as) {
|
||||
|
||||
@@ -27,10 +27,9 @@
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/core/resource.hh>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/core/smp.hh>
|
||||
|
||||
#include "log.hh"
|
||||
#include "seastarx.hh"
|
||||
@@ -62,7 +61,7 @@ extern const sstring AUTH_PACKAGE_NAME;
|
||||
|
||||
template <class Task>
|
||||
future<> once_among_shards(Task&& f) {
|
||||
if (this_shard_id() == 0u) {
|
||||
if (engine().cpu_id() == 0u) {
|
||||
return f();
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ extern "C" {
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/range.hpp>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/reactor.hh>
|
||||
|
||||
#include "auth/authenticated_user.hh"
|
||||
#include "auth/common.hh"
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
#include <optional>
|
||||
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/reactor.hh>
|
||||
|
||||
#include "auth/authenticated_user.hh"
|
||||
#include "auth/common.hh"
|
||||
@@ -230,7 +230,7 @@ future<authenticated_user> password_authenticator::authenticate(
|
||||
// obsolete prepared statements pretty quickly.
|
||||
// Rely on query processing caching statements instead, and lets assume
|
||||
// that a map lookup string->statement is not gonna kill us much.
|
||||
return futurize_invoke([this, username, password] {
|
||||
return futurize_apply([this, username, password] {
|
||||
static const sstring query = format("SELECT {} FROM {} WHERE {} = ?",
|
||||
SALTED_HASH,
|
||||
meta::roles_table::qualified_name(),
|
||||
|
||||
@@ -178,7 +178,7 @@ future<> service::start(::service::migration_manager& mm) {
|
||||
return create_keyspace_if_missing(mm);
|
||||
}).then([this] {
|
||||
return _role_manager->start().then([this] {
|
||||
return when_all_succeed(_authorizer->start(), _authenticator->start()).discard_result();
|
||||
return when_all_succeed(_authorizer->start(), _authenticator->start());
|
||||
});
|
||||
}).then([this] {
|
||||
_permissions_cache = std::make_unique<permissions_cache>(_permissions_cache_config, *this, log);
|
||||
@@ -199,7 +199,7 @@ future<> service::stop() {
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).then([this] {
|
||||
return when_all_succeed(_role_manager->stop(), _authorizer->stop(), _authenticator->stop()).discard_result();
|
||||
return when_all_succeed(_role_manager->stop(), _authorizer->stop(), _authenticator->stop());
|
||||
});
|
||||
}
|
||||
|
||||
@@ -419,7 +419,7 @@ future<> create_role(
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
return futurize_invoke(
|
||||
return futurize_apply(
|
||||
&validate_authentication_options_are_supported,
|
||||
options,
|
||||
ser.underlying_authenticator().supported_options()).then([&ser, name, &options] {
|
||||
@@ -443,7 +443,7 @@ future<> alter_role(
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
return futurize_invoke(
|
||||
return futurize_apply(
|
||||
&validate_authentication_options_are_supported,
|
||||
options,
|
||||
ser.underlying_authenticator().supported_options()).then([&ser, name, &options] {
|
||||
@@ -458,9 +458,7 @@ future<> drop_role(const service& ser, std::string_view name) {
|
||||
|
||||
return when_all_succeed(
|
||||
a.revoke_all(name),
|
||||
a.revoke_all(r))
|
||||
.discard_result()
|
||||
.handle_exception_type([](const unsupported_authorization_operation&) {
|
||||
a.revoke_all(r)).handle_exception_type([](const unsupported_authorization_operation&) {
|
||||
// Nothing.
|
||||
});
|
||||
}).then([&ser, name] {
|
||||
@@ -473,7 +471,7 @@ future<> drop_role(const service& ser, std::string_view name) {
|
||||
future<bool> has_role(const service& ser, std::string_view grantee, std::string_view name) {
|
||||
return when_all_succeed(
|
||||
validate_role_exists(ser, name),
|
||||
ser.get_roles(grantee)).then_unpack([name](role_set all_roles) {
|
||||
ser.get_roles(grantee)).then([name](role_set all_roles) {
|
||||
return make_ready_future<bool>(all_roles.count(sstring(name)) != 0);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -161,7 +161,7 @@ future<> standard_role_manager::create_metadata_tables_if_missing() const {
|
||||
meta::role_members_table::name,
|
||||
_qp,
|
||||
create_role_members_query,
|
||||
_migration_manager)).discard_result();
|
||||
_migration_manager));
|
||||
}
|
||||
|
||||
future<> standard_role_manager::create_default_role_if_missing() const {
|
||||
@@ -367,7 +367,7 @@ future<> standard_role_manager::drop(std::string_view role_name) const {
|
||||
{sstring(role_name)}).discard_result();
|
||||
};
|
||||
|
||||
return when_all_succeed(revoke_from_members(), revoke_members_of()).then_unpack([delete_role = std::move(delete_role)] {
|
||||
return when_all_succeed(revoke_from_members(), revoke_members_of()).then([delete_role = std::move(delete_role)] {
|
||||
return delete_role();
|
||||
});
|
||||
});
|
||||
@@ -416,7 +416,7 @@ standard_role_manager::modify_membership(
|
||||
return make_ready_future<>();
|
||||
};
|
||||
|
||||
return when_all_succeed(modify_roles(), modify_role_members()).discard_result();
|
||||
return when_all_succeed(modify_roles(), modify_role_members());
|
||||
}
|
||||
|
||||
future<>
|
||||
@@ -445,7 +445,7 @@ standard_role_manager::grant(std::string_view grantee_name, std::string_view rol
|
||||
});
|
||||
};
|
||||
|
||||
return when_all_succeed(check_redundant(), check_cycle()).then_unpack([this, role_name, grantee_name] {
|
||||
return when_all_succeed(check_redundant(), check_cycle()).then([this, role_name, grantee_name] {
|
||||
return this->modify_membership(grantee_name, role_name, membership_change::add);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -158,7 +158,7 @@ public:
|
||||
}
|
||||
|
||||
virtual future<authenticated_user> get_authenticated_user() const {
|
||||
return futurize_invoke([this] {
|
||||
return futurize_apply([this] {
|
||||
return _sasl->get_authenticated_user().handle_exception([](auto ep) {
|
||||
try {
|
||||
std::rethrow_exception(ep);
|
||||
|
||||
@@ -176,7 +176,7 @@ public:
|
||||
return make_ready_future<>();
|
||||
}
|
||||
virtual future<> fast_forward_to(position_range pr, db::timeout_clock::time_point timeout) override {
|
||||
return make_exception_future<>(make_backtraced_exception_ptr<std::bad_function_call>());
|
||||
throw std::bad_function_call();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -39,10 +39,7 @@ class caching_options {
|
||||
|
||||
sstring _key_cache;
|
||||
sstring _row_cache;
|
||||
bool _enabled = true;
|
||||
caching_options(sstring k, sstring r, bool enabled)
|
||||
: _key_cache(k), _row_cache(r), _enabled(enabled)
|
||||
{
|
||||
caching_options(sstring k, sstring r) : _key_cache(k), _row_cache(r) {
|
||||
if ((k != "ALL") && (k != "NONE")) {
|
||||
throw exceptions::configuration_exception("Invalid key value: " + k);
|
||||
}
|
||||
@@ -62,53 +59,36 @@ class caching_options {
|
||||
caching_options() : _key_cache(default_key), _row_cache(default_row) {}
|
||||
public:
|
||||
|
||||
bool enabled() const {
|
||||
return _enabled;
|
||||
}
|
||||
|
||||
std::map<sstring, sstring> to_map() const {
|
||||
std::map<sstring, sstring> res = {{ "keys", _key_cache },
|
||||
{ "rows_per_partition", _row_cache }};
|
||||
if (!_enabled) {
|
||||
res.insert({"enabled", "false"});
|
||||
}
|
||||
return res;
|
||||
return {{ "keys", _key_cache }, { "rows_per_partition", _row_cache }};
|
||||
}
|
||||
|
||||
sstring to_sstring() const {
|
||||
return json::to_json(to_map());
|
||||
}
|
||||
|
||||
static caching_options get_disabled_caching_options() {
|
||||
return caching_options("NONE", "NONE", false);
|
||||
}
|
||||
|
||||
template<typename Map>
|
||||
static caching_options from_map(const Map & map) {
|
||||
sstring k = default_key;
|
||||
sstring r = default_row;
|
||||
bool e = true;
|
||||
|
||||
for (auto& p : map) {
|
||||
if (p.first == "keys") {
|
||||
k = p.second;
|
||||
} else if (p.first == "rows_per_partition") {
|
||||
r = p.second;
|
||||
} else if (p.first == "enabled") {
|
||||
e = p.second == "true";
|
||||
} else {
|
||||
throw exceptions::configuration_exception("Invalid caching option: " + p.first);
|
||||
}
|
||||
}
|
||||
return caching_options(k, r, e);
|
||||
return caching_options(k, r);
|
||||
}
|
||||
static caching_options from_sstring(const sstring& str) {
|
||||
return from_map(json::to_map(str));
|
||||
}
|
||||
|
||||
bool operator==(const caching_options& other) const {
|
||||
return _key_cache == other._key_cache && _row_cache == other._row_cache
|
||||
&& _enabled == other._enabled;
|
||||
return _key_cache == other._key_cache && _row_cache == other._row_cache;
|
||||
}
|
||||
bool operator!=(const caching_options& other) const {
|
||||
return !(*this == other);
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cdc_partitioner.hh"
|
||||
#include "dht/token.hh"
|
||||
#include "schema.hh"
|
||||
#include "sstables/key.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "cdc/generation.hh"
|
||||
#include "keys.hh"
|
||||
|
||||
static const sstring cdc_partitioner_name = "com.scylladb.dht.CDCPartitioner";
|
||||
|
||||
namespace cdc {
|
||||
|
||||
const sstring cdc_partitioner::name() const {
|
||||
return cdc_partitioner_name;
|
||||
}
|
||||
|
||||
static dht::token to_token(int64_t value) {
|
||||
return dht::token(dht::token::kind::key, value);
|
||||
}
|
||||
|
||||
static dht::token to_token(bytes_view key) {
|
||||
// Key should be 16 B long, of which first 8 B are used for token calculation
|
||||
if (key.size() != 2*sizeof(int64_t)) {
|
||||
return dht::minimum_token();
|
||||
}
|
||||
return to_token(stream_id::token_from_bytes(key));
|
||||
}
|
||||
|
||||
dht::token
|
||||
cdc_partitioner::get_token(const sstables::key_view& key) const {
|
||||
return to_token(bytes_view(key));
|
||||
}
|
||||
|
||||
dht::token
|
||||
cdc_partitioner::get_token(const schema& s, partition_key_view key) const {
|
||||
auto exploded_key = key.explode(s);
|
||||
return to_token(exploded_key[0]);
|
||||
}
|
||||
|
||||
using registry = class_registrator<dht::i_partitioner, cdc_partitioner>;
|
||||
static registry registrator(cdc_partitioner_name);
|
||||
static registry registrator_short_name("CDCPartitioner");
|
||||
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sstring.hh>
|
||||
|
||||
#include "bytes.hh"
|
||||
#include "dht/i_partitioner.hh"
|
||||
|
||||
class schema;
|
||||
class partition_key_view;
|
||||
|
||||
namespace sstables {
|
||||
|
||||
class key_view;
|
||||
|
||||
}
|
||||
|
||||
namespace cdc {
|
||||
|
||||
struct cdc_partitioner final : public dht::i_partitioner {
|
||||
cdc_partitioner() = default;
|
||||
virtual const sstring name() const override;
|
||||
virtual dht::token get_token(const schema& s, partition_key_view key) const override;
|
||||
virtual dht::token get_token(const sstables::key_view& key) const override;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
@@ -80,7 +80,7 @@ bool stream_id::operator<(const stream_id& o) const {
|
||||
return _value < o._value;
|
||||
}
|
||||
|
||||
static int64_t bytes_to_int64(bytes_view b, size_t offset) {
|
||||
static int64_t bytes_to_int64(const bytes& b, size_t offset) {
|
||||
assert(b.size() >= offset + sizeof(int64_t));
|
||||
int64_t res;
|
||||
std::copy_n(b.begin() + offset, sizeof(int64_t), reinterpret_cast<int8_t *>(&res));
|
||||
@@ -88,17 +88,13 @@ static int64_t bytes_to_int64(bytes_view b, size_t offset) {
|
||||
}
|
||||
|
||||
int64_t stream_id::first() const {
|
||||
return token_from_bytes(_value);
|
||||
return bytes_to_int64(_value, 0);
|
||||
}
|
||||
|
||||
int64_t stream_id::second() const {
|
||||
return bytes_to_int64(_value, sizeof(int64_t));
|
||||
}
|
||||
|
||||
int64_t stream_id::token_from_bytes(bytes_view b) {
|
||||
return bytes_to_int64(b, 0);
|
||||
}
|
||||
|
||||
const bytes& stream_id::to_bytes() const {
|
||||
return _value;
|
||||
}
|
||||
@@ -123,105 +119,176 @@ const std::vector<token_range_description>& topology_description::entries() cons
|
||||
return _entries;
|
||||
}
|
||||
|
||||
static stream_id create_stream_id(dht::token t) {
|
||||
static stream_id make_random_stream_id() {
|
||||
static thread_local std::mt19937_64 rand_gen(std::random_device().operator()());
|
||||
static thread_local std::uniform_int_distribution<int64_t> rand_dist(std::numeric_limits<int64_t>::min());
|
||||
|
||||
return {dht::token::to_int64(t), rand_dist(rand_gen)};
|
||||
return {rand_dist(rand_gen), rand_dist(rand_gen)};
|
||||
}
|
||||
|
||||
class topology_description_generator final {
|
||||
const db::config& _cfg;
|
||||
const std::unordered_set<dht::token>& _bootstrap_tokens;
|
||||
const locator::token_metadata& _token_metadata;
|
||||
const gms::gossiper& _gossiper;
|
||||
|
||||
// Compute a set of tokens that split the token ring into vnodes
|
||||
auto get_tokens() const {
|
||||
auto tokens = _token_metadata.sorted_tokens();
|
||||
auto it = tokens.insert(
|
||||
tokens.end(), _bootstrap_tokens.begin(), _bootstrap_tokens.end());
|
||||
std::sort(it, tokens.end());
|
||||
std::inplace_merge(tokens.begin(), it, tokens.end());
|
||||
tokens.erase(std::unique(tokens.begin(), tokens.end()), tokens.end());
|
||||
return tokens;
|
||||
/* Given:
|
||||
* 1. a set of tokens which split the token ring into token ranges (vnodes),
|
||||
* 2. information on how each token range is distributed among its owning node's shards
|
||||
* this function tries to generate a set of CDC stream identifiers such that for each
|
||||
* shard and vnode pair there exists a stream whose token falls into this
|
||||
* vnode and is owned by this shard.
|
||||
*
|
||||
* It then builds a cdc::topology_description which maps tokens to these
|
||||
* found stream identifiers, such that if token T is owned by shard S in vnode V,
|
||||
* it gets mapped to the stream identifier generated for (S, V).
|
||||
*/
|
||||
// Run in seastar::async context.
|
||||
topology_description generate_topology_description(
|
||||
const db::config& cfg,
|
||||
const std::unordered_set<dht::token>& bootstrap_tokens,
|
||||
const locator::token_metadata& token_metadata,
|
||||
const gms::gossiper& gossiper) {
|
||||
if (bootstrap_tokens.empty()) {
|
||||
throw std::runtime_error(
|
||||
"cdc: bootstrap tokens is empty in generate_topology_description");
|
||||
}
|
||||
|
||||
// Fetch sharding parameters for a node that owns vnode ending with this.end
|
||||
// Returns <shard_count, ignore_msb> pair.
|
||||
std::pair<size_t, uint8_t> get_sharding_info(dht::token end) const {
|
||||
if (_bootstrap_tokens.count(end) > 0) {
|
||||
return {smp::count, _cfg.murmur3_partitioner_ignore_msb_bits()};
|
||||
auto tokens = token_metadata.sorted_tokens();
|
||||
tokens.insert(tokens.end(), bootstrap_tokens.begin(), bootstrap_tokens.end());
|
||||
std::sort(tokens.begin(), tokens.end());
|
||||
tokens.erase(std::unique(tokens.begin(), tokens.end()), tokens.end());
|
||||
|
||||
std::vector<token_range_description> entries(tokens.size());
|
||||
int spots_to_fill = 0;
|
||||
|
||||
for (size_t i = 0; i < tokens.size(); ++i) {
|
||||
auto& entry = entries[i];
|
||||
entry.token_range_end = tokens[i];
|
||||
|
||||
if (bootstrap_tokens.count(entry.token_range_end) > 0) {
|
||||
entry.streams.resize(smp::count);
|
||||
entry.sharding_ignore_msb = cfg.murmur3_partitioner_ignore_msb_bits();
|
||||
} else {
|
||||
auto endpoint = _token_metadata.get_endpoint(end);
|
||||
auto endpoint = token_metadata.get_endpoint(entry.token_range_end);
|
||||
if (!endpoint) {
|
||||
throw std::runtime_error(
|
||||
format("Can't find endpoint for token {}", end));
|
||||
throw std::runtime_error(format("Can't find endpoint for token {}", entry.token_range_end));
|
||||
}
|
||||
auto sc = get_shard_count(*endpoint, gossiper);
|
||||
entry.streams.resize(sc > 0 ? sc : 1);
|
||||
entry.sharding_ignore_msb = get_sharding_ignore_msb(*endpoint, gossiper);
|
||||
}
|
||||
|
||||
spots_to_fill += entry.streams.size();
|
||||
}
|
||||
|
||||
auto schema = schema_builder("fake_ks", "fake_table")
|
||||
.with_column("stream_id", bytes_type, column_kind::partition_key)
|
||||
.build();
|
||||
|
||||
auto quota = std::chrono::seconds(spots_to_fill / 2000 + 1);
|
||||
auto start_time = std::chrono::system_clock::now();
|
||||
|
||||
// For each pair (i, j), 0 <= i < streams.size(), 0 <= j < streams[i].size(),
|
||||
// try to find a stream (stream[i][j]) such that the token of this stream will get mapped to this stream
|
||||
// (refer to the comments above topology_description's definition to understand how it describes the mapping).
|
||||
// We find the streams by randomly generating them and checking into which pairs they get mapped.
|
||||
// NOTE: this algorithm is temporary and will be replaced after per-table-partitioner feature gets merged in.
|
||||
repeat([&] {
|
||||
for (int i = 0; i < 500; ++i) {
|
||||
auto stream_id = make_random_stream_id();
|
||||
auto token = dht::get_token(*schema, stream_id.to_partition_key(*schema));
|
||||
|
||||
// Find the token range into which our stream_id's token landed.
|
||||
auto it = std::lower_bound(tokens.begin(), tokens.end(), token);
|
||||
auto& entry = entries[it != tokens.end() ? std::distance(tokens.begin(), it) : 0];
|
||||
|
||||
auto shard_id = dht::shard_of(entry.streams.size(), entry.sharding_ignore_msb, token);
|
||||
assert(shard_id < entry.streams.size());
|
||||
|
||||
if (!entry.streams[shard_id].is_set()) {
|
||||
--spots_to_fill;
|
||||
entry.streams[shard_id] = stream_id;
|
||||
}
|
||||
}
|
||||
|
||||
if (!spots_to_fill) {
|
||||
return stop_iteration::yes;
|
||||
}
|
||||
|
||||
auto now = std::chrono::system_clock::now();
|
||||
auto passed = std::chrono::duration_cast<std::chrono::seconds>(now - start_time);
|
||||
if (passed > quota) {
|
||||
return stop_iteration::yes;
|
||||
}
|
||||
|
||||
return stop_iteration::no;
|
||||
}).get();
|
||||
|
||||
if (spots_to_fill) {
|
||||
// We were not able to generate stream ids for each (token range, shard) pair.
|
||||
|
||||
// For each range that has a stream, for each shard for this range that doesn't have a stream,
|
||||
// use the stream id of the next shard for this range.
|
||||
|
||||
// For each range that doesn't have any stream,
|
||||
// use streams of the first range to the left which does have a stream.
|
||||
|
||||
cdc_log.warn("Generation of CDC streams failed to create streams for some (vnode, shard) pair."
|
||||
" This can lead to worse performance.");
|
||||
|
||||
stream_id some_stream;
|
||||
size_t idx = 0;
|
||||
for (; idx < entries.size(); ++idx) {
|
||||
for (auto s: entries[idx].streams) {
|
||||
if (s.is_set()) {
|
||||
some_stream = s;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (some_stream.is_set()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(idx != entries.size() && some_stream.is_set());
|
||||
|
||||
// Iterate over all ranges in the clockwise direction, starting with the one we found a stream for.
|
||||
for (size_t off = 0; off < entries.size(); ++off) {
|
||||
auto& ss = entries[(idx + off) % entries.size()].streams;
|
||||
|
||||
int last_set_stream_idx = ss.size() - 1;
|
||||
while (last_set_stream_idx > -1 && !ss[last_set_stream_idx].is_set()) {
|
||||
--last_set_stream_idx;
|
||||
}
|
||||
|
||||
if (last_set_stream_idx == -1) {
|
||||
cdc_log.warn(
|
||||
"CDC wasn't able to generate any stream for vnode ({}, {}]. We'll use another vnode's streams"
|
||||
" instead. This might lead to inconsistencies.",
|
||||
tokens[(idx + off + entries.size() - 1) % entries.size()], tokens[(idx + off) % entries.size()]);
|
||||
|
||||
ss[0] = some_stream;
|
||||
last_set_stream_idx = 0;
|
||||
}
|
||||
|
||||
some_stream = ss[last_set_stream_idx];
|
||||
|
||||
// Replace 'unset' stream ids with indexes below last_set_stream_idx
|
||||
for (int s_idx = last_set_stream_idx - 1; s_idx > -1; --s_idx) {
|
||||
if (ss[s_idx].is_set()) {
|
||||
some_stream = ss[s_idx];
|
||||
} else {
|
||||
ss[s_idx] = some_stream;
|
||||
}
|
||||
}
|
||||
// Replace 'unset' stream ids with indexes above last_set_stream_idx
|
||||
for (int s_idx = ss.size() - 1; s_idx > last_set_stream_idx; --s_idx) {
|
||||
if (ss[s_idx].is_set()) {
|
||||
some_stream = ss[s_idx];
|
||||
} else {
|
||||
ss[s_idx] = some_stream;
|
||||
}
|
||||
}
|
||||
auto sc = get_shard_count(*endpoint, _gossiper);
|
||||
return {sc > 0 ? sc : 1, get_sharding_ignore_msb(*endpoint, _gossiper)};
|
||||
}
|
||||
}
|
||||
|
||||
token_range_description create_description(dht::token start, dht::token end) const {
|
||||
token_range_description desc;
|
||||
|
||||
desc.token_range_end = end;
|
||||
|
||||
auto [shard_count, ignore_msb] = get_sharding_info(end);
|
||||
desc.streams.reserve(shard_count);
|
||||
desc.sharding_ignore_msb = ignore_msb;
|
||||
|
||||
dht::sharder sharder(shard_count, ignore_msb);
|
||||
for (size_t shard_idx = 0; shard_idx < shard_count; ++shard_idx) {
|
||||
auto t = dht::find_first_token_for_shard(sharder, start, end, shard_idx);
|
||||
desc.streams.push_back(create_stream_id(t));
|
||||
}
|
||||
|
||||
return desc;
|
||||
}
|
||||
public:
|
||||
topology_description_generator(
|
||||
const db::config& cfg,
|
||||
const std::unordered_set<dht::token>& bootstrap_tokens,
|
||||
const locator::token_metadata& token_metadata,
|
||||
const gms::gossiper& gossiper)
|
||||
: _cfg(cfg)
|
||||
, _bootstrap_tokens(bootstrap_tokens)
|
||||
, _token_metadata(token_metadata)
|
||||
, _gossiper(gossiper)
|
||||
{}
|
||||
|
||||
/*
|
||||
* Generate a set of CDC stream identifiers such that for each shard
|
||||
* and vnode pair there exists a stream whose token falls into this vnode
|
||||
* and is owned by this shard. It is sometimes not possible to generate
|
||||
* a CDC stream identifier for some (vnode, shard) pair because not all
|
||||
* shards have to own tokens in a vnode. Small vnode can be totally owned
|
||||
* by a single shard. In such case, a stream identifier that maps to
|
||||
* end of the vnode is generated.
|
||||
*
|
||||
* Then build a cdc::topology_description which maps tokens to generated
|
||||
* stream identifiers, such that if token T is owned by shard S in vnode V,
|
||||
* it gets mapped to the stream identifier generated for (S, V).
|
||||
*/
|
||||
// Run in seastar::async context.
|
||||
topology_description generate() const {
|
||||
const auto tokens = get_tokens();
|
||||
|
||||
std::vector<token_range_description> vnode_descriptions;
|
||||
vnode_descriptions.reserve(tokens.size());
|
||||
|
||||
vnode_descriptions.push_back(
|
||||
create_description(tokens.back(), tokens.front()));
|
||||
for (size_t idx = 1; idx < tokens.size(); ++idx) {
|
||||
vnode_descriptions.push_back(
|
||||
create_description(tokens[idx - 1], tokens[idx]));
|
||||
}
|
||||
|
||||
return {std::move(vnode_descriptions)};
|
||||
}
|
||||
};
|
||||
return {std::move(entries)};
|
||||
}
|
||||
|
||||
bool should_propose_first_generation(const gms::inet_address& me, const gms::gossiper& g) {
|
||||
auto my_host_id = g.get_host_id(me);
|
||||
@@ -252,7 +319,9 @@ db_clock::time_point make_new_cdc_generation(
|
||||
db::system_distributed_keyspace& sys_dist_ks,
|
||||
std::chrono::milliseconds ring_delay,
|
||||
bool for_testing) {
|
||||
auto gen = topology_description_generator(cfg, bootstrap_tokens, tm, g).generate();
|
||||
assert(!bootstrap_tokens.empty());
|
||||
|
||||
auto gen = generate_topology_description(cfg, bootstrap_tokens, tm, g);
|
||||
|
||||
// Begin the race.
|
||||
auto ts = db_clock::now() + (
|
||||
@@ -266,7 +335,12 @@ db_clock::time_point make_new_cdc_generation(
|
||||
std::optional<db_clock::time_point> get_streams_timestamp_for(const gms::inet_address& endpoint, const gms::gossiper& g) {
|
||||
auto streams_ts_string = g.get_application_state_value(endpoint, gms::application_state::CDC_STREAMS_TIMESTAMP);
|
||||
cdc_log.trace("endpoint={}, streams_ts_string={}", endpoint, streams_ts_string);
|
||||
return gms::versioned_value::cdc_streams_timestamp_from_string(streams_ts_string);
|
||||
|
||||
if (streams_ts_string.empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return db_clock::time_point(db_clock::duration(std::stoll(streams_ts_string)));
|
||||
}
|
||||
|
||||
// Run inside seastar::async context.
|
||||
|
||||
@@ -77,7 +77,6 @@ public:
|
||||
const bytes& to_bytes() const;
|
||||
|
||||
partition_key to_partition_key(const schema& log_schema) const;
|
||||
static int64_t token_from_bytes(bytes_view);
|
||||
};
|
||||
|
||||
/* Describes a mapping of tokens to CDC streams in a token range.
|
||||
@@ -130,7 +129,7 @@ bool should_propose_first_generation(const gms::inet_address& me, const gms::gos
|
||||
*/
|
||||
future<db_clock::time_point> get_local_streams_timestamp();
|
||||
|
||||
/* Generate a new set of CDC streams and insert it into the distributed cdc_generations table.
|
||||
/* Generate a new set of CDC streams and insert it into the distributed cdc_topology_description table.
|
||||
* Returns the timestamp of this new generation.
|
||||
*
|
||||
* Should be called when starting the node for the first time (i.e., joining the ring).
|
||||
@@ -159,9 +158,9 @@ db_clock::time_point make_new_cdc_generation(
|
||||
std::optional<db_clock::time_point> get_streams_timestamp_for(const gms::inet_address& endpoint, const gms::gossiper&);
|
||||
|
||||
/* Inform CDC users about a generation of streams (identified by the given timestamp)
|
||||
* by inserting it into the cdc_streams table.
|
||||
* by inserting it into the cdc_description table.
|
||||
*
|
||||
* Assumes that the cdc_generations table contains this generation.
|
||||
* Assumes that the cdc_topology_description table contains this generation.
|
||||
*
|
||||
* Returning from this function does not mean that the table update was successful: the function
|
||||
* might run an asynchronous task in the background.
|
||||
|
||||
263
cdc/log.cc
263
cdc/log.cc
@@ -51,7 +51,6 @@
|
||||
#include "types/listlike_partial_deserializing_iterator.hh"
|
||||
#include "tracing/trace_state.hh"
|
||||
#include "stats.hh"
|
||||
#include "compaction_strategy.hh"
|
||||
|
||||
namespace std {
|
||||
|
||||
@@ -174,7 +173,6 @@ public:
|
||||
auto& db = _ctxt._proxy.get_db().local();
|
||||
auto logname = log_name(schema.cf_name());
|
||||
check_that_cdc_log_table_does_not_exist(db, schema, logname);
|
||||
ensure_that_table_has_no_counter_columns(schema);
|
||||
|
||||
// in seastar thread
|
||||
auto log_schema = create_log_schema(schema);
|
||||
@@ -201,7 +199,6 @@ public:
|
||||
}
|
||||
if (is_cdc) {
|
||||
check_for_attempt_to_create_nested_cdc_log(new_schema);
|
||||
ensure_that_table_has_no_counter_columns(new_schema);
|
||||
}
|
||||
|
||||
auto logname = log_name(old_schema.cf_name());
|
||||
@@ -242,8 +239,7 @@ public:
|
||||
future<std::tuple<std::vector<mutation>, lw_shared_ptr<cdc::operation_result_tracker>>> augment_mutation_call(
|
||||
lowres_clock::time_point timeout,
|
||||
std::vector<mutation>&& mutations,
|
||||
tracing::trace_state_ptr tr_state,
|
||||
db::consistency_level write_cl
|
||||
tracing::trace_state_ptr tr_state
|
||||
);
|
||||
|
||||
template<typename Iter>
|
||||
@@ -266,13 +262,6 @@ private:
|
||||
schema.ks_name(), logname));
|
||||
}
|
||||
}
|
||||
|
||||
static void ensure_that_table_has_no_counter_columns(const schema& schema) {
|
||||
if (schema.is_counter()) {
|
||||
throw exceptions::invalid_request_exception(format("Cannot create CDC log for table {}.{}. Counter support not implemented",
|
||||
schema.ks_name(), schema.cf_name()));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
cdc::cdc_service::cdc_service(service::storage_proxy& proxy)
|
||||
@@ -286,7 +275,6 @@ cdc::cdc_service::cdc_service(db_context ctxt)
|
||||
}
|
||||
|
||||
future<> cdc::cdc_service::stop() {
|
||||
_impl->_ctxt._proxy.set_cdc_service(nullptr);
|
||||
return _impl->stop();
|
||||
}
|
||||
|
||||
@@ -402,44 +390,18 @@ bytes log_data_column_deleted_elements_name_bytes(const bytes& column_name) {
|
||||
|
||||
static schema_ptr create_log_schema(const schema& s, std::optional<utils::UUID> uuid) {
|
||||
schema_builder b(s.ks_name(), log_name(s.cf_name()));
|
||||
b.with_partitioner("com.scylladb.dht.CDCPartitioner");
|
||||
b.set_compaction_strategy(sstables::compaction_strategy_type::time_window);
|
||||
b.set_comment(sprint("CDC log for %s.%s", s.ks_name(), s.cf_name()));
|
||||
auto ttl_seconds = s.cdc_options().ttl();
|
||||
if (ttl_seconds > 0) {
|
||||
b.set_gc_grace_seconds(0);
|
||||
auto ceil = [] (int dividend, int divisor) {
|
||||
return dividend / divisor + (dividend % divisor == 0 ? 0 : 1);
|
||||
};
|
||||
auto seconds_to_minutes = [] (int seconds_value) {
|
||||
using namespace std::chrono;
|
||||
return std::chrono::ceil<minutes>(seconds(seconds_value)).count();
|
||||
};
|
||||
// What's the minimum window that won't create more than 24 sstables.
|
||||
auto window_seconds = ceil(ttl_seconds, 24);
|
||||
auto window_minutes = seconds_to_minutes(window_seconds);
|
||||
b.set_compaction_strategy_options({
|
||||
{"compaction_window_unit", "MINUTES"},
|
||||
{"compaction_window_size", std::to_string(window_minutes)},
|
||||
// A new SSTable will become fully expired every
|
||||
// `window_seconds` seconds so we shouldn't check for expired
|
||||
// sstables too often.
|
||||
{"expired_sstable_check_frequency_seconds",
|
||||
std::to_string(std::max(1, window_seconds / 2))},
|
||||
});
|
||||
}
|
||||
b.with_column(log_meta_column_name_bytes("stream_id"), bytes_type, column_kind::partition_key);
|
||||
b.with_column(log_meta_column_name_bytes("time"), timeuuid_type, column_kind::clustering_key);
|
||||
b.with_column(log_meta_column_name_bytes("batch_seq_no"), int32_type, column_kind::clustering_key);
|
||||
b.with_column(log_meta_column_name_bytes("operation"), data_type_for<operation_native_type>());
|
||||
b.with_column(log_meta_column_name_bytes("ttl"), long_type);
|
||||
b.set_caching_options(caching_options::get_disabled_caching_options());
|
||||
auto add_columns = [&] (const schema::const_iterator_range_type& columns, bool is_data_col = false) {
|
||||
for (const auto& column : columns) {
|
||||
auto type = column.type;
|
||||
if (is_data_col && type->is_multi_cell()) {
|
||||
if (is_data_col) {
|
||||
type = visit(*type, make_visitor(
|
||||
// non-frozen lists are represented as map<timeuuid, value_type>. Otherwise we cannot express delta
|
||||
// lists are represented as map<timeuuid, value_type>. Otherwise we cannot express delta
|
||||
[] (const list_type_impl& type) -> data_type {
|
||||
return map_type_impl::get_instance(type.name_comparator(), type.value_comparator(), false);
|
||||
},
|
||||
@@ -448,6 +410,7 @@ static schema_ptr create_log_schema(const schema& s, std::optional<utils::UUID>
|
||||
return type.freeze();
|
||||
}
|
||||
));
|
||||
type = type->freeze();
|
||||
}
|
||||
b.with_column(log_data_column_name_bytes(column.name()), type);
|
||||
if (is_data_col) {
|
||||
@@ -479,7 +442,7 @@ static schema_ptr create_log_schema(const schema& s, std::optional<utils::UUID>
|
||||
if (uuid) {
|
||||
b.set_uuid(*uuid);
|
||||
}
|
||||
|
||||
|
||||
return b.build();
|
||||
}
|
||||
|
||||
@@ -557,12 +520,6 @@ api::timestamp_type find_timestamp(const schema& s, const mutation& m) {
|
||||
[&] (collection_mutation_view_description mview) {
|
||||
t = mview.tomb.timestamp;
|
||||
if (t != api::missing_timestamp) {
|
||||
// A collection tombstone with timestamp T can be created with:
|
||||
// UPDATE ks.t USING TIMESTAMP T + 1 SET X = null WHERE ...
|
||||
// where X is a non-atomic column.
|
||||
// This is, among others, the reason why we show it in the CDC log
|
||||
// with cdc$time using timestamp T + 1 instead of T.
|
||||
t += 1;
|
||||
return stop_iteration::yes;
|
||||
}
|
||||
|
||||
@@ -759,81 +716,6 @@ private:
|
||||
const column_definition& _ttl_col;
|
||||
ttl_opt _cdc_ttl_opt;
|
||||
|
||||
/**
|
||||
* #6070, #6084
|
||||
* Non-atomic column assignments which use a TTL are broken into two invocations
|
||||
* of `transform`, such as in the following example:
|
||||
* CREATE TABLE t (a int PRIMARY KEY, b map<int, int>) WITH cdc = {'enabled':true};
|
||||
* UPDATE t USING TTL 5 SET b = {0:0} WHERE a = 0;
|
||||
*
|
||||
* The above UPDATE creates a tombstone and a (0, 0) cell; because tombstones don't have the notion
|
||||
* of a TTL, we split the UPDATE into two separate changes (represented as two separate delta rows in the log,
|
||||
* resulting in two invocations of `transform`): one change for the deletion with no TTL,
|
||||
* and one change for adding cells with TTL = 5.
|
||||
*
|
||||
* In other words, we use the fact that
|
||||
* UPDATE t USING TTL 5 SET b = {0:0} WHERE a = 0;
|
||||
* is equivalent to
|
||||
* BEGIN UNLOGGED BATCH
|
||||
* UPDATE t SET b = null WHERE a = 0;
|
||||
* UPDATE t USING TTL 5 SET b = b + {0:0} WHERE a = 0;
|
||||
* APPLY BATCH;
|
||||
* (the mutations are the same in both cases),
|
||||
* and perform a separate `transform` call for each statement in the batch.
|
||||
*
|
||||
* An assignment also happens when an INSERT statement is used as follows:
|
||||
* INSERT INTO t (a, b) VALUES (0, {0:0}) USING TTL 5;
|
||||
*
|
||||
* This will be split into three separate changes (three invocations of `transform`):
|
||||
* 1. One with TTL = 5 for the row marker (introduces by the INSERT), indicating that a row was inserted.
|
||||
* 2. One without a TTL for the tombstone, indicating that the collection was cleared.
|
||||
* 3. One with TTL = 5 for the addition of cell (0, 0), indicating that the collection
|
||||
* was extended by a new key/value.
|
||||
*
|
||||
* Why do we need three changes and not two, like in the UPDATE case?
|
||||
* The tombstone needs to be a separate change because it doesn't have a TTL,
|
||||
* so only the row marker change could potentially be merged with the cell change (1 and 3 above).
|
||||
* However, we cannot do that: the row marker change is of INSERT type (cdc$operation == cdc::operation::insert),
|
||||
* but there is no way to create a statement that
|
||||
* - has a row marker,
|
||||
* - adds cells to a collection,
|
||||
* - but *doesn't* add a tombstone for this collection.
|
||||
* INSERT statements that modify collections *always* add tombstones.
|
||||
*
|
||||
* Merging the row marker with the cell addition would result in such an impossible statement.
|
||||
*
|
||||
* Instead, we observe that
|
||||
* INSERT INTO t (a, b) VALUES (0, {0:0}) USING TTL 5;
|
||||
* is equivalent to
|
||||
* BEGIN UNLOGGED BATCH
|
||||
* INSERT INTO t (a) VALUES (0) USING TTL 5;
|
||||
* UPDATE t SET b = null WHERE a = 0;
|
||||
* UPDATE t USING TTL 5 SET b = b + {0:0} WHERE a = 0;
|
||||
* APPLY BATCH;
|
||||
* and perform a separate `transform` call for each statement in the batch.
|
||||
*
|
||||
* Unfortunately, due to splitting, the cell addition call (b + b {0:0}) does not know about the tombstone.
|
||||
* If it was performed independently from the tombstone call, it would create a wrong post-image:
|
||||
* the post-image would look as if the previous cells still existed.
|
||||
* For example, suppose that b was equal to {1:1} before the above statement was performed.
|
||||
* Then the final post-image for b for above statement/batch would be {0:0, 1:1}, when instead it should be {0:0}.
|
||||
*
|
||||
* To handle this we use the fact that
|
||||
* 1. changes without a TTL are treated as if TTL = 0,
|
||||
* 2. `transform` is invoked in order of increasing TTLs,
|
||||
* and we maintain state between `transform` invocations (`_non_atomic_column_deletes`).
|
||||
*
|
||||
* Thus, the tombstone call will happen *before* the cell addition call,
|
||||
* so the cell addition call will know that there previously was a tombstone
|
||||
* and create a correct post-image.
|
||||
*
|
||||
* Furthermore, `transform` calls for INSERT changes (i.e. with a row marker)
|
||||
* happen before `transform` calls for UPDATE changes, so in the case of an INSERT
|
||||
* which modifies a collection column as above, the row marker call will happen first;
|
||||
* its post-image will still show {1:1} for the collection column. Good.
|
||||
*/
|
||||
std::unordered_set<const column_definition*> _non_atomic_column_deletes;
|
||||
|
||||
clustering_key set_pk_columns(const partition_key& pk, api::timestamp_type ts, bytes decomposed_tuuid, int batch_no, mutation& m) const {
|
||||
const auto log_ck = clustering_key::from_exploded(
|
||||
*m.schema(), { decomposed_tuuid, int32_type->decompose(batch_no) });
|
||||
@@ -934,18 +816,18 @@ public:
|
||||
|
||||
// TODO: is pre-image data based on query enough. We only have actual column data. Do we need
|
||||
// more details like tombstones/ttl? Probably not but keep in mind.
|
||||
std::tuple<mutation, stats::part_type_set> transform(const mutation& m, const cql3::untyped_result_set* rs, api::timestamp_type ts, bytes tuuid, int& batch_no) {
|
||||
std::tuple<mutation, stats::part_type_set> transform(const mutation& m, const cql3::untyped_result_set* rs, api::timestamp_type ts, bytes tuuid, int& batch_no) const {
|
||||
auto stream_id = _ctx._cdc_metadata.get_stream(ts, m.token());
|
||||
mutation res(_log_schema, stream_id.to_partition_key(*_log_schema));
|
||||
const auto preimage = _schema->cdc_options().preimage();
|
||||
const auto postimage = _schema->cdc_options().postimage();
|
||||
stats::part_type_set touched_parts;
|
||||
auto& p = m.partition();
|
||||
if (p.partition_tombstone()) {
|
||||
// Partition deletion
|
||||
touched_parts.set<stats::part_type::PARTITION_DELETE>();
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, 0, res);
|
||||
set_operation(log_ck, ts, operation::partition_delete, res);
|
||||
++batch_no;
|
||||
} else if (!p.row_tombstones().empty()) {
|
||||
// range deletion
|
||||
touched_parts.set<stats::part_type::RANGE_TOMBSTONE>();
|
||||
@@ -967,30 +849,37 @@ public:
|
||||
}
|
||||
};
|
||||
{
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no, res);
|
||||
set_bound(log_ck, rt.start);
|
||||
const auto start_operation = rt.start_kind == bound_kind::incl_start
|
||||
? operation::range_delete_start_inclusive
|
||||
: operation::range_delete_start_exclusive;
|
||||
set_operation(log_ck, ts, start_operation, res);
|
||||
++batch_no;
|
||||
}
|
||||
{
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no, res);
|
||||
set_bound(log_ck, rt.end);
|
||||
const auto end_operation = rt.end_kind == bound_kind::incl_end
|
||||
? operation::range_delete_end_inclusive
|
||||
: operation::range_delete_end_exclusive;
|
||||
set_operation(log_ck, ts, end_operation, res);
|
||||
++batch_no;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// should be insert, update or deletion
|
||||
auto process_cells = [&](const row& r, column_kind ckind, const clustering_key& log_ck, std::optional<clustering_key> pikey, const cql3::untyped_result_set_row* pirow, std::optional<clustering_key> poikey) -> std::optional<gc_clock::duration> {
|
||||
if (postimage && !poikey) {
|
||||
poikey = set_pk_columns(m.key(), ts, tuuid, ++batch_no, res);
|
||||
set_operation(*poikey, ts, operation::post_image, res);
|
||||
}
|
||||
std::optional<gc_clock::duration> ttl;
|
||||
std::unordered_set<column_id> columns_assigned;
|
||||
r.for_each_cell([&](column_id id, const atomic_cell_or_collection& cell) {
|
||||
auto& cdef = _schema->column_at(ckind, id);
|
||||
auto* dst = _log_schema->get_column_definition(log_data_column_name_bytes(cdef.name()));
|
||||
auto has_pirow = pirow && pirow->has(cdef.name_as_text());
|
||||
bool is_column_delete = true;
|
||||
bytes_opt value;
|
||||
bytes_opt deleted_elements = std::nullopt;
|
||||
@@ -1033,9 +922,6 @@ public:
|
||||
: value.value().first_fragment()
|
||||
;
|
||||
value_callback(key, val, live);
|
||||
if (value.is_live_and_has_ttl()) {
|
||||
ttl = value.ttl();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1114,30 +1000,29 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
bytes_opt prev = get_preimage_col_value(cdef, pirow);
|
||||
if (is_column_delete) {
|
||||
res.set_cell(log_ck, log_data_column_deleted_name_bytes(cdef.name()), data_value(true), ts, _cdc_ttl_opt);
|
||||
}
|
||||
if (value) {
|
||||
res.set_cell(log_ck, *dst, atomic_cell::make_live(*dst->type, ts, *value, _cdc_ttl_opt));
|
||||
}
|
||||
|
||||
if (prev && pikey) {
|
||||
bytes_opt prev;
|
||||
|
||||
if (has_pirow) {
|
||||
prev = get_preimage_col_value(cdef, pirow);
|
||||
assert(std::addressof(res.partition().clustered_row(*_log_schema, *pikey)) != std::addressof(res.partition().clustered_row(*_log_schema, log_ck)));
|
||||
assert(pikey->explode() != log_ck.explode());
|
||||
res.set_cell(*pikey, *dst, atomic_cell::make_live(*dst->type, ts, *prev, _cdc_ttl_opt));
|
||||
}
|
||||
|
||||
if (is_column_delete) {
|
||||
res.set_cell(log_ck, log_data_column_deleted_name_bytes(cdef.name()), data_value(true), ts, _cdc_ttl_opt);
|
||||
if (!cdef.is_atomic()) {
|
||||
_non_atomic_column_deletes.insert(&cdef);
|
||||
}
|
||||
// don't merge with pre-image iff column delete
|
||||
prev = std::nullopt;
|
||||
}
|
||||
|
||||
if (value) {
|
||||
res.set_cell(log_ck, *dst, atomic_cell::make_live(*dst->type, ts, *value, _cdc_ttl_opt));
|
||||
}
|
||||
|
||||
if (poikey) {
|
||||
if (postimage) {
|
||||
// keep track of actually assigning this already
|
||||
columns_assigned.emplace(id);
|
||||
// don't merge with pre-image iff column delete
|
||||
if (is_column_delete) {
|
||||
prev = std::nullopt;
|
||||
}
|
||||
if (cdef.is_atomic() && !is_column_delete && value) {
|
||||
res.set_cell(*poikey, *dst, atomic_cell::make_live(*dst->type, ts, *value, _cdc_ttl_opt));
|
||||
} else if (!cdef.is_atomic() && (value || (deleted_elements && prev))) {
|
||||
@@ -1150,10 +1035,10 @@ public:
|
||||
});
|
||||
|
||||
// fill in all columns not already processed. Note that column nulls are also marked.
|
||||
if (poikey && pirow) {
|
||||
if (postimage && pirow) {
|
||||
for (auto& cdef : _schema->columns(ckind)) {
|
||||
if (!columns_assigned.count(cdef.id)) {
|
||||
auto v = get_preimage_col_value(cdef, pirow);
|
||||
auto v = pirow->get_view_opt(cdef.name_as_text());
|
||||
if (v) {
|
||||
auto dst = _log_schema->get_column_definition(log_data_column_name_bytes(cdef.name()));
|
||||
res.set_cell(*poikey, *dst, atomic_cell::make_live(*dst->type, ts, *v, _cdc_ttl_opt));
|
||||
@@ -1172,18 +1057,16 @@ public:
|
||||
|
||||
if (rs && !rs->empty()) {
|
||||
// For static rows, only one row from the result set is needed
|
||||
pirow = &rs->front();
|
||||
}
|
||||
|
||||
if (preimage && pirow) {
|
||||
pikey = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
pikey = set_pk_columns(m.key(), ts, tuuid, batch_no, res);
|
||||
set_operation(*pikey, ts, operation::pre_image, res);
|
||||
pirow = &rs->front();
|
||||
++batch_no;
|
||||
}
|
||||
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no, res);
|
||||
|
||||
if (postimage) {
|
||||
poikey = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
poikey = set_pk_columns(m.key(), ts, tuuid, ++batch_no, res);
|
||||
set_operation(*poikey, ts, operation::post_image, res);
|
||||
}
|
||||
|
||||
@@ -1194,6 +1077,7 @@ public:
|
||||
if (ttl) {
|
||||
set_ttl(log_ck, ts, *ttl, res);
|
||||
}
|
||||
++batch_no;
|
||||
} else {
|
||||
touched_parts.set_if<stats::part_type::CLUSTERING_ROW>(!p.clustered_rows().empty());
|
||||
for (const rows_entry& r : p.clustered_rows()) {
|
||||
@@ -1214,21 +1098,19 @@ public:
|
||||
}
|
||||
}
|
||||
if (match) {
|
||||
pikey = set_pk_columns(m.key(), ts, tuuid, batch_no, res);
|
||||
set_operation(*pikey, ts, operation::pre_image, res);
|
||||
pirow = &utr;
|
||||
++batch_no;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (preimage && pirow) {
|
||||
pikey = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
set_operation(*pikey, ts, operation::pre_image, res);
|
||||
}
|
||||
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
auto log_ck = set_pk_columns(m.key(), ts, tuuid, batch_no, res);
|
||||
|
||||
if (postimage) {
|
||||
poikey = set_pk_columns(m.key(), ts, tuuid, batch_no++, res);
|
||||
poikey = set_pk_columns(m.key(), ts, tuuid, ++batch_no, res);
|
||||
set_operation(*poikey, ts, operation::post_image, res);
|
||||
}
|
||||
|
||||
@@ -1238,7 +1120,7 @@ public:
|
||||
auto cdef = _log_schema->get_column_definition(log_data_column_name_bytes(column.name()));
|
||||
res.set_cell(log_ck, *cdef, atomic_cell::make_live(*column.type, ts, bytes_view(ck_value[pos]), _cdc_ttl_opt));
|
||||
|
||||
if (pikey) {
|
||||
if (pirow) {
|
||||
assert(pirow->has(column.name_as_text()));
|
||||
res.set_cell(*pikey, *cdef, atomic_cell::make_live(*column.type, ts, bytes_view(ck_value[pos]), _cdc_ttl_opt));
|
||||
}
|
||||
@@ -1257,8 +1139,8 @@ public:
|
||||
for (const column_definition& column: _schema->regular_columns()) {
|
||||
assert(pirow->has(column.name_as_text()));
|
||||
auto& cdef = *_log_schema->get_column_definition(log_data_column_name_bytes(column.name()));
|
||||
auto value = get_preimage_col_value(column, pirow);
|
||||
res.set_cell(*pikey, cdef, atomic_cell::make_live(*column.type, ts, bytes_view(*value), _cdc_ttl_opt));
|
||||
auto value = get_preimage_col_value(column, pirow);
|
||||
res.set_cell(*pikey, cdef, atomic_cell::make_live(*column.type, ts, bytes_view(value), _cdc_ttl_opt));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -1275,6 +1157,7 @@ public:
|
||||
}
|
||||
}
|
||||
set_operation(log_ck, ts, cdc_op, res);
|
||||
++batch_no;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1282,13 +1165,7 @@ public:
|
||||
return std::make_tuple(std::move(res), touched_parts);
|
||||
}
|
||||
|
||||
bytes_opt get_preimage_col_value(const column_definition& cdef, const cql3::untyped_result_set_row *pirow) {
|
||||
/**
|
||||
* #6070 - see comment for _non_atomic_column_deletes
|
||||
*/
|
||||
if (!pirow || !pirow->has(cdef.name_as_text()) || _non_atomic_column_deletes.count(&cdef)) {
|
||||
return std::nullopt;
|
||||
}
|
||||
static bytes get_preimage_col_value(const column_definition& cdef, const cql3::untyped_result_set_row *pirow) {
|
||||
return cdef.is_atomic()
|
||||
? pirow->get_blob(cdef.name_as_text())
|
||||
: visit(*cdef.type, make_visitor(
|
||||
@@ -1317,7 +1194,7 @@ public:
|
||||
|
||||
future<lw_shared_ptr<cql3::untyped_result_set>> pre_image_select(
|
||||
service::client_state& client_state,
|
||||
db::consistency_level write_cl,
|
||||
db::consistency_level cl,
|
||||
const mutation& m)
|
||||
{
|
||||
auto& p = m.partition();
|
||||
@@ -1398,10 +1275,7 @@ public:
|
||||
auto partition_slice = query::partition_slice(std::move(bounds), std::move(static_columns), std::move(regular_columns), std::move(opts));
|
||||
auto command = ::make_lw_shared<query::read_command>(_schema->id(), _schema->version(), partition_slice, row_limit);
|
||||
|
||||
const auto select_cl = adjust_cl(write_cl);
|
||||
|
||||
try {
|
||||
return _ctx._proxy.query(_schema, std::move(command), std::move(partition_ranges), select_cl, service::storage_proxy::coordinator_query_options(default_timeout(), empty_service_permit(), client_state)).then(
|
||||
return _ctx._proxy.query(_schema, std::move(command), std::move(partition_ranges), cl, service::storage_proxy::coordinator_query_options(default_timeout(), empty_service_permit(), client_state)).then(
|
||||
[s = _schema, partition_slice = std::move(partition_slice), selection = std::move(selection)] (service::storage_proxy::coordinator_query_result qr) -> lw_shared_ptr<cql3::untyped_result_set> {
|
||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now(), cql_serialization_format::latest());
|
||||
query::result_view::consume(*qr.query_result, partition_slice, cql3::selection::result_set_builder::visitor(builder, *s, *selection));
|
||||
@@ -1411,25 +1285,6 @@ public:
|
||||
}
|
||||
return make_lw_shared<cql3::untyped_result_set>(*result_set);
|
||||
});
|
||||
} catch (exceptions::unavailable_exception& e) {
|
||||
// `query` can throw `unavailable_exception`, which is seen by clients as ~ "NoHostAvailable".
|
||||
// So, we'll translate it to a `read_failure_exception` with custom message.
|
||||
cdc_log.debug("Preimage: translating a (read) `unavailable_exception` to `request_execution_exception` - {}", e);
|
||||
throw exceptions::read_failure_exception("CDC preimage query could not achieve the CL.",
|
||||
e.consistency, e.alive, 0, e.required, false);
|
||||
}
|
||||
}
|
||||
|
||||
/** For preimage query use the same CL as for base write, except for CLs ANY and ALL. */
|
||||
static db::consistency_level adjust_cl(db::consistency_level write_cl) {
|
||||
if (write_cl == db::consistency_level::ANY) {
|
||||
return db::consistency_level::ONE;
|
||||
} else if (write_cl == db::consistency_level::ALL || write_cl == db::consistency_level::SERIAL) {
|
||||
return db::consistency_level::QUORUM;
|
||||
} else if (write_cl == db::consistency_level::LOCAL_SERIAL) {
|
||||
return db::consistency_level::LOCAL_QUORUM;
|
||||
}
|
||||
return write_cl;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1445,7 +1300,7 @@ transform_mutations(std::vector<mutation>& muts, decltype(muts.size()) batch_siz
|
||||
} // namespace cdc
|
||||
|
||||
future<std::tuple<std::vector<mutation>, lw_shared_ptr<cdc::operation_result_tracker>>>
|
||||
cdc::cdc_service::impl::augment_mutation_call(lowres_clock::time_point timeout, std::vector<mutation>&& mutations, tracing::trace_state_ptr tr_state, db::consistency_level write_cl) {
|
||||
cdc::cdc_service::impl::augment_mutation_call(lowres_clock::time_point timeout, std::vector<mutation>&& mutations, tracing::trace_state_ptr tr_state) {
|
||||
// we do all this because in the case of batches, we can have mixed schemas.
|
||||
auto e = mutations.end();
|
||||
auto i = std::find_if(mutations.begin(), e, [](const mutation& m) {
|
||||
@@ -1460,8 +1315,8 @@ cdc::cdc_service::impl::augment_mutation_call(lowres_clock::time_point timeout,
|
||||
mutations.reserve(2 * mutations.size());
|
||||
|
||||
return do_with(std::move(mutations), service::query_state(service::client_state::for_internal_calls(), empty_service_permit()), operation_details{},
|
||||
[this, timeout, i, tr_state = std::move(tr_state), write_cl] (std::vector<mutation>& mutations, service::query_state& qs, operation_details& details) {
|
||||
return transform_mutations(mutations, 1, [this, &mutations, timeout, &qs, tr_state = tr_state, &details, write_cl] (int idx) mutable {
|
||||
[this, timeout, i, tr_state = std::move(tr_state)] (std::vector<mutation>& mutations, service::query_state& qs, operation_details& details) {
|
||||
return transform_mutations(mutations, 1, [this, &mutations, timeout, &qs, tr_state = tr_state, &details] (int idx) mutable {
|
||||
auto& m = mutations[idx];
|
||||
auto s = m.schema();
|
||||
|
||||
@@ -1477,7 +1332,7 @@ cdc::cdc_service::impl::augment_mutation_call(lowres_clock::time_point timeout,
|
||||
// iff a batch contains several modifications to the same table. Otoh, batch is rare(?)
|
||||
// so this is premature.
|
||||
tracing::trace(tr_state, "CDC: Selecting preimage for {}", m.decorated_key());
|
||||
f = trans.pre_image_select(qs.get_client_state(), write_cl, m).then_wrapped([this] (future<lw_shared_ptr<cql3::untyped_result_set>> f) {
|
||||
f = trans.pre_image_select(qs.get_client_state(), db::consistency_level::LOCAL_QUORUM, m).then_wrapped([this] (future<lw_shared_ptr<cql3::untyped_result_set>> f) {
|
||||
auto& cdc_stats = _ctxt._proxy.get_cdc_stats();
|
||||
cdc_stats.counters_total.preimage_selects++;
|
||||
if (f.failed()) {
|
||||
@@ -1489,7 +1344,7 @@ cdc::cdc_service::impl::augment_mutation_call(lowres_clock::time_point timeout,
|
||||
tracing::trace(tr_state, "CDC: Preimage not enabled for the table, not querying current value of {}", m.decorated_key());
|
||||
}
|
||||
|
||||
return f.then([trans = std::move(trans), &mutations, idx, tr_state, &details] (lw_shared_ptr<cql3::untyped_result_set> rs) mutable {
|
||||
return f.then([trans = std::move(trans), &mutations, idx, tr_state = std::move(tr_state), &details] (lw_shared_ptr<cql3::untyped_result_set> rs) {
|
||||
auto& m = mutations[idx];
|
||||
auto& s = m.schema();
|
||||
details.had_preimage |= s->cdc_options().preimage();
|
||||
@@ -1534,6 +1389,6 @@ bool cdc::cdc_service::needs_cdc_augmentation(const std::vector<mutation>& mutat
|
||||
}
|
||||
|
||||
future<std::tuple<std::vector<mutation>, lw_shared_ptr<cdc::operation_result_tracker>>>
|
||||
cdc::cdc_service::augment_mutation_call(lowres_clock::time_point timeout, std::vector<mutation>&& mutations, tracing::trace_state_ptr tr_state, db::consistency_level write_cl) {
|
||||
return _impl->augment_mutation_call(timeout, std::move(mutations), std::move(tr_state), write_cl);
|
||||
cdc::cdc_service::augment_mutation_call(lowres_clock::time_point timeout, std::vector<mutation>&& mutations, tracing::trace_state_ptr tr_state) {
|
||||
return _impl->augment_mutation_call(timeout, std::move(mutations), std::move(tr_state));
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ class metadata;
|
||||
/// CDC service will listen for schema changes and iff CDC is enabled/changed
|
||||
/// create/modify/delete corresponding log tables etc as part of the schema change.
|
||||
///
|
||||
class cdc_service final : public async_sharded_service<cdc::cdc_service> {
|
||||
class cdc_service {
|
||||
class impl;
|
||||
std::unique_ptr<impl> _impl;
|
||||
public:
|
||||
@@ -91,8 +91,7 @@ public:
|
||||
future<std::tuple<std::vector<mutation>, lw_shared_ptr<operation_result_tracker>>> augment_mutation_call(
|
||||
lowres_clock::time_point timeout,
|
||||
std::vector<mutation>&& mutations,
|
||||
tracing::trace_state_ptr tr_state,
|
||||
db::consistency_level write_cl
|
||||
tracing::trace_state_ptr tr_state
|
||||
);
|
||||
bool needs_cdc_augmentation(const std::vector<mutation>&) const;
|
||||
};
|
||||
|
||||
134
cdc/split.cc
134
cdc/split.cc
@@ -30,16 +30,23 @@ struct atomic_column_update {
|
||||
atomic_cell cell;
|
||||
};
|
||||
|
||||
// see the comment inside `clustered_row_insert` for motivation for separating
|
||||
// nonatomic deletions from nonatomic updates
|
||||
struct nonatomic_column_deletion {
|
||||
column_id id;
|
||||
tombstone t;
|
||||
};
|
||||
|
||||
struct nonatomic_column_update {
|
||||
column_id id;
|
||||
tombstone t; // optional
|
||||
utils::chunked_vector<std::pair<bytes, atomic_cell>> cells;
|
||||
};
|
||||
|
||||
struct static_row_update {
|
||||
gc_clock::duration ttl;
|
||||
std::vector<atomic_column_update> atomic_entries;
|
||||
std::vector<nonatomic_column_update> nonatomic_entries;
|
||||
std::vector<nonatomic_column_deletion> nonatomic_deletions;
|
||||
std::vector<nonatomic_column_update> nonatomic_updates;
|
||||
};
|
||||
|
||||
struct clustered_row_insert {
|
||||
@@ -47,14 +54,19 @@ struct clustered_row_insert {
|
||||
clustering_key key;
|
||||
row_marker marker;
|
||||
std::vector<atomic_column_update> atomic_entries;
|
||||
std::vector<nonatomic_column_update> nonatomic_entries;
|
||||
std::vector<nonatomic_column_deletion> nonatomic_deletions;
|
||||
// INSERTs can't express updates of individual cells inside a non-atomic
|
||||
// (without deleting the entire field first), so no `nonatomic_updates` field
|
||||
// overwriting a nonatomic column inside an INSERT will be split into two changes:
|
||||
// one with a nonatomic deletion, and one with a nonatomic update
|
||||
};
|
||||
|
||||
struct clustered_row_update {
|
||||
gc_clock::duration ttl;
|
||||
clustering_key key;
|
||||
std::vector<atomic_column_update> atomic_entries;
|
||||
std::vector<nonatomic_column_update> nonatomic_entries;
|
||||
std::vector<nonatomic_column_deletion> nonatomic_deletions;
|
||||
std::vector<nonatomic_column_update> nonatomic_updates;
|
||||
};
|
||||
|
||||
struct clustered_row_deletion {
|
||||
@@ -83,7 +95,8 @@ using set_of_changes = std::map<api::timestamp_type, batch>;
|
||||
|
||||
struct row_update {
|
||||
std::vector<atomic_column_update> atomic_entries;
|
||||
std::vector<nonatomic_column_update> nonatomic_entries;
|
||||
std::vector<nonatomic_column_deletion> nonatomic_deletions;
|
||||
std::vector<nonatomic_column_update> nonatomic_updates;
|
||||
};
|
||||
|
||||
static
|
||||
@@ -109,7 +122,7 @@ extract_row_updates(const row& r, column_kind ckind, const schema& schema) {
|
||||
v.timestamp(),
|
||||
v.is_live_and_has_ttl() ? v.ttl() : gc_clock::duration(0)
|
||||
);
|
||||
auto& updates = result[timestamp_and_ttl].nonatomic_entries;
|
||||
auto& updates = result[timestamp_and_ttl].nonatomic_updates;
|
||||
if (updates.empty() || updates.back().id != id) {
|
||||
updates.push_back({id, {}});
|
||||
}
|
||||
@@ -117,12 +130,8 @@ extract_row_updates(const row& r, column_kind ckind, const schema& schema) {
|
||||
}
|
||||
|
||||
if (desc.tomb) {
|
||||
auto timestamp_and_ttl = std::pair(desc.tomb.timestamp + 1, gc_clock::duration(0));
|
||||
auto& updates = result[timestamp_and_ttl].nonatomic_entries;
|
||||
if (updates.empty() || updates.back().id != id) {
|
||||
updates.push_back({id, {}});
|
||||
}
|
||||
updates.back().t = std::move(desc.tomb);
|
||||
auto timestamp_and_ttl = std::pair(desc.tomb.timestamp, gc_clock::duration(0));
|
||||
result[timestamp_and_ttl].nonatomic_deletions.push_back({id, desc.tomb});
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -139,7 +148,8 @@ set_of_changes extract_changes(const mutation& base_mutation, const schema& base
|
||||
res[timestamp].static_updates.push_back({
|
||||
ttl,
|
||||
std::move(up.atomic_entries),
|
||||
std::move(up.nonatomic_entries)
|
||||
std::move(up.nonatomic_deletions),
|
||||
std::move(up.nonatomic_updates)
|
||||
});
|
||||
}
|
||||
|
||||
@@ -163,9 +173,6 @@ set_of_changes extract_changes(const mutation& base_mutation, const schema& base
|
||||
};
|
||||
|
||||
for (auto& [k, up]: cr_updates) {
|
||||
// It is important that changes in the resulting `set_of_changes` are listed
|
||||
// in increasing TTL order. The reason is explained in a comment in cdc/log.cc,
|
||||
// search for "#6070".
|
||||
auto [timestamp, ttl] = k;
|
||||
|
||||
if (is_insert(timestamp, ttl)) {
|
||||
@@ -174,70 +181,25 @@ set_of_changes extract_changes(const mutation& base_mutation, const schema& base
|
||||
cr.key(),
|
||||
marker,
|
||||
std::move(up.atomic_entries),
|
||||
{}
|
||||
std::move(up.nonatomic_deletions)
|
||||
});
|
||||
|
||||
auto& cr_insert = res[timestamp].clustered_inserts.back();
|
||||
bool clustered_update_exists = false;
|
||||
for (auto& nonatomic_up: up.nonatomic_entries) {
|
||||
// Updating a collection column with an INSERT statement implies inserting a tombstone.
|
||||
//
|
||||
// For example, suppose that we have:
|
||||
// CREATE TABLE t (a int primary key, b map<int, int>);
|
||||
// Then the following statement:
|
||||
// INSERT INTO t (a, b) VALUES (0, {0:0}) USING TIMESTAMP T;
|
||||
// creates a tombstone in column b with timestamp T-1.
|
||||
// It also creates a cell (0, 0) with timestamp T.
|
||||
//
|
||||
// There is no way to create just the cell using an INSERT statement.
|
||||
// This can only be done using an UPDATE, as follows:
|
||||
// UPDATE t USING TIMESTAMP T SET b = b + {0:0} WHERE a = 0;
|
||||
// note that this is different than
|
||||
// UPDATE t USING TIMESTAMP T SET b = {0:0} WHERE a = 0;
|
||||
// which also creates a tombstone with timestamp T-1.
|
||||
//
|
||||
// It follows that:
|
||||
// - if `nonatomic_up` has a tombstone, it can be made merged with our `cr_insert`,
|
||||
// which represents an INSERT change.
|
||||
// - but if `nonatomic_up` only has cells, we must create a separate UPDATE change
|
||||
// for the cells alone.
|
||||
if (nonatomic_up.t) {
|
||||
cr_insert.nonatomic_entries.push_back(std::move(nonatomic_up));
|
||||
} else {
|
||||
if (!clustered_update_exists) {
|
||||
res[timestamp].clustered_updates.push_back({
|
||||
ttl,
|
||||
cr.key(),
|
||||
{},
|
||||
{}
|
||||
});
|
||||
|
||||
// Multiple iterations of this `for` loop (for different collection columns)
|
||||
// might want to put their `nonatomic_up`s into an UPDATE change;
|
||||
// but we don't want to create a separate change for each of them, reusing one instead.
|
||||
//
|
||||
// Example:
|
||||
// CREATE TABLE t (a int primary key, b map<int, int>, c map <int, int>) with cdc = {'enabled':true};
|
||||
// insert into t (a, b, c) values (0, {1:1}, {2:2}) USING TTL 5;
|
||||
//
|
||||
// this should create 3 delta rows:
|
||||
// 1. one for the row marker (indicating an INSERT), with TTL 5
|
||||
// 2. one for the b and c tombstones, without TTL (cdc$ttl = null)
|
||||
// 3. one for the b and c cells, with TTL 5
|
||||
// This logic takes care that b cells and c cells are put into a single change (3. above).
|
||||
clustered_update_exists = true;
|
||||
}
|
||||
|
||||
auto& cr_update = res[timestamp].clustered_updates.back();
|
||||
cr_update.nonatomic_entries.push_back(std::move(nonatomic_up));
|
||||
}
|
||||
if (!up.nonatomic_updates.empty()) {
|
||||
// nonatomic updates cannot be expressed with an INSERT.
|
||||
res[timestamp].clustered_updates.push_back({
|
||||
ttl,
|
||||
cr.key(),
|
||||
{},
|
||||
{},
|
||||
std::move(up.nonatomic_updates)
|
||||
});
|
||||
}
|
||||
} else {
|
||||
res[timestamp].clustered_updates.push_back({
|
||||
ttl,
|
||||
cr.key(),
|
||||
std::move(up.atomic_entries),
|
||||
std::move(up.nonatomic_entries)
|
||||
std::move(up.nonatomic_deletions),
|
||||
std::move(up.nonatomic_updates)
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -309,7 +271,7 @@ bool should_split(const mutation& base_mutation, const schema& base_schema) {
|
||||
}
|
||||
|
||||
if (desc.tomb) {
|
||||
if (check_or_set(desc.tomb.timestamp + 1, gc_clock::duration(0))) {
|
||||
if (check_or_set(desc.tomb.timestamp, gc_clock::duration(0))) {
|
||||
should_split = true;
|
||||
return;
|
||||
}
|
||||
@@ -364,7 +326,7 @@ bool should_split(const mutation& base_mutation, const schema& base_schema) {
|
||||
}
|
||||
|
||||
if (mview.tomb) {
|
||||
if (check_or_set(mview.tomb.timestamp + 1, gc_clock::duration(0))) {
|
||||
if (check_or_set(mview.tomb.timestamp, gc_clock::duration(0))) {
|
||||
should_split = true;
|
||||
return;
|
||||
}
|
||||
@@ -430,9 +392,13 @@ void for_each_change(const mutation& base_mutation, const schema_ptr& base_schem
|
||||
auto& cdef = base_schema->column_at(column_kind::static_column, atomic_update.id);
|
||||
m.set_static_cell(cdef, std::move(atomic_update.cell));
|
||||
}
|
||||
for (auto& nonatomic_update : sr_update.nonatomic_entries) {
|
||||
for (auto& nonatomic_delete : sr_update.nonatomic_deletions) {
|
||||
auto& cdef = base_schema->column_at(column_kind::static_column, nonatomic_delete.id);
|
||||
m.set_static_cell(cdef, collection_mutation_description{nonatomic_delete.t, {}}.serialize(*cdef.type));
|
||||
}
|
||||
for (auto& nonatomic_update : sr_update.nonatomic_updates) {
|
||||
auto& cdef = base_schema->column_at(column_kind::static_column, nonatomic_update.id);
|
||||
m.set_static_cell(cdef, collection_mutation_description{nonatomic_update.t, std::move(nonatomic_update.cells)}.serialize(*cdef.type));
|
||||
m.set_static_cell(cdef, collection_mutation_description{{}, std::move(nonatomic_update.cells)}.serialize(*cdef.type));
|
||||
}
|
||||
f(std::move(m), change_ts, tuuid, batch_no);
|
||||
}
|
||||
@@ -445,9 +411,9 @@ void for_each_change(const mutation& base_mutation, const schema_ptr& base_schem
|
||||
auto& cdef = base_schema->column_at(column_kind::regular_column, atomic_update.id);
|
||||
row.cells().apply(cdef, std::move(atomic_update.cell));
|
||||
}
|
||||
for (auto& nonatomic_update : cr_insert.nonatomic_entries) {
|
||||
auto& cdef = base_schema->column_at(column_kind::regular_column, nonatomic_update.id);
|
||||
row.cells().apply(cdef, collection_mutation_description{nonatomic_update.t, std::move(nonatomic_update.cells)}.serialize(*cdef.type));
|
||||
for (auto& nonatomic_delete : cr_insert.nonatomic_deletions) {
|
||||
auto& cdef = base_schema->column_at(column_kind::regular_column, nonatomic_delete.id);
|
||||
row.cells().apply(cdef, collection_mutation_description{nonatomic_delete.t, {}}.serialize(*cdef.type));
|
||||
}
|
||||
row.apply(cr_insert.marker);
|
||||
|
||||
@@ -462,9 +428,13 @@ void for_each_change(const mutation& base_mutation, const schema_ptr& base_schem
|
||||
auto& cdef = base_schema->column_at(column_kind::regular_column, atomic_update.id);
|
||||
row.apply(cdef, std::move(atomic_update.cell));
|
||||
}
|
||||
for (auto& nonatomic_update : cr_update.nonatomic_entries) {
|
||||
for (auto& nonatomic_delete : cr_update.nonatomic_deletions) {
|
||||
auto& cdef = base_schema->column_at(column_kind::regular_column, nonatomic_delete.id);
|
||||
row.apply(cdef, collection_mutation_description{nonatomic_delete.t, {}}.serialize(*cdef.type));
|
||||
}
|
||||
for (auto& nonatomic_update : cr_update.nonatomic_updates) {
|
||||
auto& cdef = base_schema->column_at(column_kind::regular_column, nonatomic_update.id);
|
||||
row.apply(cdef, collection_mutation_description{nonatomic_update.t, std::move(nonatomic_update.cells)}.serialize(*cdef.type));
|
||||
row.apply(cdef, collection_mutation_description{{}, std::move(nonatomic_update.cells)}.serialize(*cdef.type));
|
||||
}
|
||||
|
||||
f(std::move(m), change_ts, tuuid, batch_no);
|
||||
|
||||
@@ -22,7 +22,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "seastar/core/file.hh"
|
||||
#include "seastar/core/seastar.hh"
|
||||
#include "seastar/core/reactor.hh"
|
||||
#include "utils/disk-error-handler.hh"
|
||||
|
||||
#include "seastarx.hh"
|
||||
@@ -147,7 +147,7 @@ inline open_checked_directory(const io_error_handler& error_handler,
|
||||
sstring name)
|
||||
{
|
||||
return do_io_check(error_handler, [&] {
|
||||
return open_directory(name).then([&] (file f) {
|
||||
return engine().open_directory(name).then([&] (file f) {
|
||||
return make_ready_future<file>(make_checked_file(error_handler, f));
|
||||
});
|
||||
});
|
||||
|
||||
@@ -122,26 +122,26 @@ public:
|
||||
return {_empty_prefix, bound_kind::incl_end};
|
||||
}
|
||||
template<template<typename> typename R>
|
||||
requires Range<R, clustering_key_prefix_view>
|
||||
GCC6_CONCEPT( requires Range<R, clustering_key_prefix_view> )
|
||||
static bound_view from_range_start(const R<clustering_key_prefix>& range) {
|
||||
return range.start()
|
||||
? bound_view(range.start()->value(), range.start()->is_inclusive() ? bound_kind::incl_start : bound_kind::excl_start)
|
||||
: bottom();
|
||||
}
|
||||
template<template<typename> typename R>
|
||||
requires Range<R, clustering_key_prefix>
|
||||
GCC6_CONCEPT( requires Range<R, clustering_key_prefix> )
|
||||
static bound_view from_range_end(const R<clustering_key_prefix>& range) {
|
||||
return range.end()
|
||||
? bound_view(range.end()->value(), range.end()->is_inclusive() ? bound_kind::incl_end : bound_kind::excl_end)
|
||||
: top();
|
||||
}
|
||||
template<template<typename> typename R>
|
||||
requires Range<R, clustering_key_prefix>
|
||||
GCC6_CONCEPT( requires Range<R, clustering_key_prefix> )
|
||||
static std::pair<bound_view, bound_view> from_range(const R<clustering_key_prefix>& range) {
|
||||
return {from_range_start(range), from_range_end(range)};
|
||||
}
|
||||
template<template<typename> typename R>
|
||||
requires Range<R, clustering_key_prefix_view>
|
||||
GCC6_CONCEPT( requires Range<R, clustering_key_prefix_view> )
|
||||
static std::optional<typename R<clustering_key_prefix_view>::bound> to_range_bound(const bound_view& bv) {
|
||||
if (&bv._prefix.get() == &_empty_prefix) {
|
||||
return {};
|
||||
|
||||
@@ -61,7 +61,7 @@ bool collection_mutation_view::is_empty() const {
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
requires std::is_invocable_r_v<const data::type_info&, F, collection_mutation_input_stream&>
|
||||
GCC6_CONCEPT(requires std::is_invocable_r_v<const data::type_info&, F, collection_mutation_input_stream&>)
|
||||
static bool is_any_live(const atomic_cell_value_view& data, tombstone tomb, gc_clock::time_point now, F&& read_cell_type_info) {
|
||||
auto in = collection_mutation_input_stream(data);
|
||||
auto has_tomb = in.read_trivial<bool>();
|
||||
@@ -108,7 +108,7 @@ bool collection_mutation_view::is_any_live(const abstract_type& type, tombstone
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
requires std::is_invocable_r_v<const data::type_info&, F, collection_mutation_input_stream&>
|
||||
GCC6_CONCEPT(requires std::is_invocable_r_v<const data::type_info&, F, collection_mutation_input_stream&>)
|
||||
static api::timestamp_type last_update(const atomic_cell_value_view& data, F&& read_cell_type_info) {
|
||||
auto in = collection_mutation_input_stream(data);
|
||||
api::timestamp_type max = api::missing_timestamp;
|
||||
@@ -313,7 +313,7 @@ collection_mutation collection_mutation_view_description::serialize(const abstra
|
||||
}
|
||||
|
||||
template <typename C>
|
||||
requires std::is_base_of_v<abstract_type, std::remove_reference_t<C>>
|
||||
GCC6_CONCEPT(requires std::is_base_of_v<abstract_type, std::remove_reference_t<C>>)
|
||||
static collection_mutation_view_description
|
||||
merge(collection_mutation_view_description a, collection_mutation_view_description b, C&& key_type) {
|
||||
using element_type = std::pair<bytes_view, atomic_cell_view>;
|
||||
@@ -375,7 +375,7 @@ collection_mutation merge(const abstract_type& type, collection_mutation_view a,
|
||||
}
|
||||
|
||||
template <typename C>
|
||||
requires std::is_base_of_v<abstract_type, std::remove_reference_t<C>>
|
||||
GCC6_CONCEPT(requires std::is_base_of_v<abstract_type, std::remove_reference_t<C>>)
|
||||
static collection_mutation_view_description
|
||||
difference(collection_mutation_view_description a, collection_mutation_view_description b, C&& key_type)
|
||||
{
|
||||
@@ -421,7 +421,7 @@ collection_mutation difference(const abstract_type& type, collection_mutation_vi
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
requires std::is_invocable_r_v<std::pair<bytes_view, atomic_cell_view>, F, collection_mutation_input_stream&>
|
||||
GCC6_CONCEPT(requires std::is_invocable_r_v<std::pair<bytes_view, atomic_cell_view>, F, collection_mutation_input_stream&>)
|
||||
static collection_mutation_view_description
|
||||
deserialize_collection_mutation(collection_mutation_input_stream& in, F&& read_kv) {
|
||||
collection_mutation_view_description ret;
|
||||
|
||||
@@ -23,13 +23,11 @@
|
||||
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
#include <seastar/core/file.hh>
|
||||
|
||||
#include "schema_fwd.hh"
|
||||
#include "sstables/shared_sstable.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "sstables/compaction_backlog_manager.hh"
|
||||
#include "compaction_strategy_type.hh"
|
||||
|
||||
class table;
|
||||
using column_family = table;
|
||||
@@ -39,6 +37,15 @@ struct mutation_source_metadata;
|
||||
|
||||
namespace sstables {
|
||||
|
||||
enum class compaction_strategy_type {
|
||||
null,
|
||||
major,
|
||||
size_tiered,
|
||||
leveled,
|
||||
date_tiered,
|
||||
time_window,
|
||||
};
|
||||
|
||||
class compaction_strategy_impl;
|
||||
class sstable;
|
||||
class sstable_set;
|
||||
@@ -63,6 +70,8 @@ public:
|
||||
|
||||
compaction_descriptor get_major_compaction_job(column_family& cf, std::vector<shared_sstable> candidates);
|
||||
|
||||
std::vector<resharding_descriptor> get_resharding_jobs(column_family& cf, std::vector<shared_sstable> candidates);
|
||||
|
||||
// Some strategies may look at the compacted and resulting sstables to
|
||||
// get some useful information for subsequent compactions.
|
||||
void notify_completion(const std::vector<shared_sstable>& removed, const std::vector<shared_sstable>& added);
|
||||
@@ -131,23 +140,6 @@ public:
|
||||
uint64_t adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate);
|
||||
|
||||
reader_consumer make_interposer_consumer(const mutation_source_metadata& ms_meta, reader_consumer end_consumer);
|
||||
|
||||
// Returns whether or not interposer consumer is used by a given strategy.
|
||||
bool use_interposer_consumer() const;
|
||||
|
||||
// Informs the caller (usually the compaction manager) about what would it take for this set of
|
||||
// SSTables closer to becoming in-strategy. If this returns an empty compaction descriptor, this
|
||||
// means that the sstable set is already in-strategy.
|
||||
//
|
||||
// The caller can specify one of two modes: strict or relaxed. In relaxed mode the tolerance for
|
||||
// what is considered offstrategy is higher. It can be used, for instance, for when the system
|
||||
// is restarting and previous compactions were likely in-flight. In strict mode, we are less
|
||||
// tolerant to invariant breakages.
|
||||
//
|
||||
// The caller should also pass a maximum number of SSTables which is the maximum amount of
|
||||
// SSTables that can be added into a single job.
|
||||
compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, const ::io_priority_class& iop, reshape_mode mode);
|
||||
|
||||
};
|
||||
|
||||
// Creates a compaction_strategy object from one of the strategies available.
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace sstables {
|
||||
|
||||
enum class compaction_strategy_type {
|
||||
null,
|
||||
major,
|
||||
size_tiered,
|
||||
leveled,
|
||||
date_tiered,
|
||||
time_window,
|
||||
};
|
||||
|
||||
enum class reshape_mode { strict, relaxed };
|
||||
}
|
||||
35
compound.hh
35
compound.hh
@@ -29,6 +29,7 @@
|
||||
#include <boost/range/adaptor/transformed.hpp>
|
||||
#include "utils/serialization.hh"
|
||||
#include <seastar/util/backtrace.hh>
|
||||
#include "unimplemented.hh"
|
||||
|
||||
enum class allow_prefixes { no, yes };
|
||||
|
||||
@@ -90,7 +91,7 @@ private:
|
||||
return len;
|
||||
}
|
||||
public:
|
||||
bytes serialize_single(bytes&& v) const {
|
||||
bytes serialize_single(bytes&& v) {
|
||||
return serialize_value({std::move(v)});
|
||||
}
|
||||
template<typename RangeOfSerializedComponents>
|
||||
@@ -108,7 +109,7 @@ public:
|
||||
static bytes serialize_value(std::initializer_list<T> values) {
|
||||
return serialize_value(boost::make_iterator_range(values.begin(), values.end()));
|
||||
}
|
||||
bytes serialize_optionals(const std::vector<bytes_opt>& values) const {
|
||||
bytes serialize_optionals(const std::vector<bytes_opt>& values) {
|
||||
return serialize_value(values | boost::adaptors::transformed([] (const bytes_opt& bo) -> bytes_view {
|
||||
if (!bo) {
|
||||
throw std::logic_error("attempted to create key component from empty optional");
|
||||
@@ -116,7 +117,7 @@ public:
|
||||
return *bo;
|
||||
}));
|
||||
}
|
||||
bytes serialize_value_deep(const std::vector<data_value>& values) const {
|
||||
bytes serialize_value_deep(const std::vector<data_value>& values) {
|
||||
// TODO: Optimize
|
||||
std::vector<bytes> partial;
|
||||
partial.reserve(values.size());
|
||||
@@ -127,7 +128,7 @@ public:
|
||||
}
|
||||
return serialize_value(partial);
|
||||
}
|
||||
bytes decompose_value(const value_type& values) const {
|
||||
bytes decompose_value(const value_type& values) {
|
||||
return serialize_value(values);
|
||||
}
|
||||
class iterator : public std::iterator<std::input_iterator_tag, const bytes_view> {
|
||||
@@ -179,7 +180,7 @@ public:
|
||||
static boost::iterator_range<iterator> components(const bytes_view& v) {
|
||||
return { begin(v), end(v) };
|
||||
}
|
||||
value_type deserialize_value(bytes_view v) const {
|
||||
value_type deserialize_value(bytes_view v) {
|
||||
std::vector<bytes> result;
|
||||
result.reserve(_types.size());
|
||||
std::transform(begin(v), end(v), std::back_inserter(result), [] (auto&& v) {
|
||||
@@ -187,10 +188,10 @@ public:
|
||||
});
|
||||
return result;
|
||||
}
|
||||
bool less(bytes_view b1, bytes_view b2) const {
|
||||
bool less(bytes_view b1, bytes_view b2) {
|
||||
return compare(b1, b2) < 0;
|
||||
}
|
||||
size_t hash(bytes_view v) const {
|
||||
size_t hash(bytes_view v) {
|
||||
if (_byte_order_equal) {
|
||||
return std::hash<bytes_view>()(v);
|
||||
}
|
||||
@@ -202,7 +203,7 @@ public:
|
||||
}
|
||||
return h;
|
||||
}
|
||||
int compare(bytes_view b1, bytes_view b2) const {
|
||||
int compare(bytes_view b1, bytes_view b2) {
|
||||
if (_byte_order_comparable) {
|
||||
if (_is_reversed) {
|
||||
return compare_unsigned(b2, b1);
|
||||
@@ -223,21 +224,11 @@ public:
|
||||
bool is_empty(bytes_view v) const {
|
||||
return begin(v) == end(v);
|
||||
}
|
||||
void validate(bytes_view v) const {
|
||||
std::vector<bytes_view> values(begin(v), end(v));
|
||||
if (AllowPrefixes == allow_prefixes::no && values.size() < _types.size()) {
|
||||
throw marshal_exception(fmt::format("compound::validate(): non-prefixable compound cannot be a prefix"));
|
||||
}
|
||||
if (values.size() > _types.size()) {
|
||||
throw marshal_exception(fmt::format("compound::validate(): cannot have more values than types, have {} values but only {} types",
|
||||
values.size(), _types.size()));
|
||||
}
|
||||
for (size_t i = 0; i != values.size(); ++i) {
|
||||
//FIXME: is it safe to assume internal serialization-format format?
|
||||
_types[i]->validate(values[i], cql_serialization_format::internal());
|
||||
}
|
||||
void validate(bytes_view v) {
|
||||
// FIXME: implement
|
||||
warn(unimplemented::cause::VALIDATION);
|
||||
}
|
||||
bool equal(bytes_view v1, bytes_view v2) const {
|
||||
bool equal(bytes_view v1, bytes_view v2) {
|
||||
if (_byte_order_equal) {
|
||||
return compare_unsigned(v1, v2) == 0;
|
||||
}
|
||||
|
||||
@@ -27,9 +27,6 @@
|
||||
#include "schema.hh"
|
||||
#include "sstables/version.hh"
|
||||
|
||||
//FIXME: de-inline methods and define this as static in a .cc file.
|
||||
extern logging::logger compound_logger;
|
||||
|
||||
//
|
||||
// This header provides adaptors between the representation used by our compound_type<>
|
||||
// and representation used by Origin.
|
||||
@@ -213,8 +210,6 @@ public:
|
||||
, _is_compound(true)
|
||||
{ }
|
||||
|
||||
explicit composite(const composite_view& v);
|
||||
|
||||
composite()
|
||||
: _bytes()
|
||||
, _is_compound(true)
|
||||
@@ -342,9 +337,8 @@ public:
|
||||
class iterator : public std::iterator<std::input_iterator_tag, const component_view> {
|
||||
bytes_view _v;
|
||||
component_view _current;
|
||||
bool _strict_mode = true;
|
||||
private:
|
||||
void do_read_current() {
|
||||
void read_current() {
|
||||
size_type len;
|
||||
{
|
||||
if (_v.empty()) {
|
||||
@@ -360,23 +354,11 @@ public:
|
||||
_v.remove_prefix(len);
|
||||
_current = component_view(std::move(value), to_eoc(read_simple<eoc_type>(_v)));
|
||||
}
|
||||
void read_current() {
|
||||
try {
|
||||
do_read_current();
|
||||
} catch (marshal_exception&) {
|
||||
if (_strict_mode) {
|
||||
on_internal_error(compound_logger, std::current_exception());
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
struct end_iterator_tag {};
|
||||
|
||||
// In strict-mode de-serialization errors will invoke `on_internal_error()`.
|
||||
iterator(const bytes_view& v, bool is_compound, bool is_static, bool strict_mode = true)
|
||||
: _v(v), _strict_mode(strict_mode) {
|
||||
iterator(const bytes_view& v, bool is_compound, bool is_static)
|
||||
: _v(v) {
|
||||
if (is_static) {
|
||||
_v.remove_prefix(2);
|
||||
}
|
||||
@@ -390,7 +372,6 @@ public:
|
||||
|
||||
iterator(end_iterator_tag) : _v(nullptr, 0) {}
|
||||
|
||||
public:
|
||||
iterator& operator++() {
|
||||
read_current();
|
||||
return *this;
|
||||
@@ -406,9 +387,6 @@ public:
|
||||
const value_type* operator->() const { return &_current; }
|
||||
bool operator!=(const iterator& i) const { return _v.begin() != i._v.begin(); }
|
||||
bool operator==(const iterator& i) const { return _v.begin() == i._v.begin(); }
|
||||
|
||||
friend class composite;
|
||||
friend class composite_view;
|
||||
};
|
||||
|
||||
iterator begin() const {
|
||||
@@ -505,7 +483,6 @@ public:
|
||||
};
|
||||
|
||||
class composite_view final {
|
||||
friend class composite;
|
||||
bytes_view _bytes;
|
||||
bool _is_compound;
|
||||
public:
|
||||
@@ -578,21 +555,6 @@ public:
|
||||
return composite::is_static(_bytes, _is_compound);
|
||||
}
|
||||
|
||||
bool is_valid() const {
|
||||
try {
|
||||
auto it = composite::iterator(_bytes, _is_compound, is_static(), false);
|
||||
const auto end = composite::iterator(composite::iterator::end_iterator_tag());
|
||||
size_t s = 0;
|
||||
for (; it != end; ++it) {
|
||||
auto& c = *it;
|
||||
s += c.first.size() + sizeof(composite::size_type) + sizeof(composite::eoc_type);
|
||||
}
|
||||
return s == _bytes.size();
|
||||
} catch (marshal_exception&) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
explicit operator bytes_view() const {
|
||||
return _bytes;
|
||||
}
|
||||
@@ -605,11 +567,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
inline
|
||||
composite::composite(const composite_view& v)
|
||||
: composite(bytes(v._bytes), v._is_compound)
|
||||
{ }
|
||||
|
||||
inline
|
||||
std::ostream& operator<<(std::ostream& os, const composite& v) {
|
||||
return os << composite_view(v);
|
||||
|
||||
@@ -152,39 +152,41 @@ struct uuid_type_impl final : public concrete_type<utils::UUID> {
|
||||
|
||||
template <typename Func> using visit_ret_type = std::invoke_result_t<Func, const ascii_type_impl&>;
|
||||
|
||||
template <typename Func> concept CanHandleAllTypes = requires(Func f) {
|
||||
{ f(*static_cast<const ascii_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const boolean_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const byte_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const bytes_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const counter_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const date_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const decimal_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const double_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const duration_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const empty_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const float_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const inet_addr_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const int32_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const list_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const long_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const map_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const reversed_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const set_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const short_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const simple_date_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const time_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const timestamp_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const timeuuid_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const tuple_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const user_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const utf8_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const uuid_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
{ f(*static_cast<const varint_type_impl*>(nullptr)) } -> std::same_as<visit_ret_type<Func>>;
|
||||
GCC6_CONCEPT(
|
||||
template <typename Func> concept bool CanHandleAllTypes = requires(Func f) {
|
||||
{ f(*static_cast<const ascii_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const boolean_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const byte_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const bytes_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const counter_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const date_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const decimal_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const double_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const duration_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const empty_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const float_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const inet_addr_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const int32_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const list_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const long_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const map_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const reversed_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const set_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const short_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const simple_date_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const time_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const timestamp_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const timeuuid_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const tuple_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const user_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const utf8_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const uuid_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
{ f(*static_cast<const varint_type_impl*>(nullptr)) } -> visit_ret_type<Func>;
|
||||
};
|
||||
)
|
||||
|
||||
template<typename Func>
|
||||
requires CanHandleAllTypes<Func>
|
||||
GCC6_CONCEPT(requires CanHandleAllTypes<Func>)
|
||||
static inline visit_ret_type<Func> visit(const abstract_type& t, Func&& f) {
|
||||
switch (t.get_kind()) {
|
||||
case abstract_type::kind::ascii:
|
||||
|
||||
380
configure.py
380
configure.py
@@ -32,8 +32,6 @@ import tempfile
|
||||
import textwrap
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
curdir = os.getcwd()
|
||||
|
||||
tempfile.tempdir = "./build/tmp"
|
||||
|
||||
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:]])
|
||||
@@ -168,27 +166,9 @@ def maybe_static(flag, libs):
|
||||
return libs
|
||||
|
||||
|
||||
class Source(object):
|
||||
def __init__(self, source, hh_prefix, cc_prefix):
|
||||
self.source = source
|
||||
self.hh_prefix = hh_prefix
|
||||
self.cc_prefix = cc_prefix
|
||||
|
||||
def headers(self, gen_dir):
|
||||
return [x for x in self.generated(gen_dir) if x.endswith(self.hh_prefix)]
|
||||
|
||||
def sources(self, gen_dir):
|
||||
return [x for x in self.generated(gen_dir) if x.endswith(self.cc_prefix)]
|
||||
|
||||
def objects(self, gen_dir):
|
||||
return [x.replace(self.cc_prefix, '.o') for x in self.sources(gen_dir)]
|
||||
|
||||
def endswith(self, end):
|
||||
return self.source.endswith(end)
|
||||
|
||||
class Thrift(Source):
|
||||
class Thrift(object):
|
||||
def __init__(self, source, service):
|
||||
Source.__init__(self, source, '.h', '.cpp')
|
||||
self.source = source
|
||||
self.service = service
|
||||
|
||||
def generated(self, gen_dir):
|
||||
@@ -199,6 +179,19 @@ class Thrift(Source):
|
||||
for ext in ['.cpp', '.h']]
|
||||
return [os.path.join(gen_dir, file) for file in files]
|
||||
|
||||
def headers(self, gen_dir):
|
||||
return [x for x in self.generated(gen_dir) if x.endswith('.h')]
|
||||
|
||||
def sources(self, gen_dir):
|
||||
return [x for x in self.generated(gen_dir) if x.endswith('.cpp')]
|
||||
|
||||
def objects(self, gen_dir):
|
||||
return [x.replace('.cpp', '.o') for x in self.sources(gen_dir)]
|
||||
|
||||
def endswith(self, end):
|
||||
return self.source.endswith(end)
|
||||
|
||||
|
||||
def default_target_arch():
|
||||
if platform.machine() in ['i386', 'i686', 'x86_64']:
|
||||
return 'westmere' # support PCLMUL
|
||||
@@ -208,9 +201,9 @@ def default_target_arch():
|
||||
return ''
|
||||
|
||||
|
||||
class Antlr3Grammar(Source):
|
||||
class Antlr3Grammar(object):
|
||||
def __init__(self, source):
|
||||
Source.__init__(self, source, '.hpp', '.cpp')
|
||||
self.source = source
|
||||
|
||||
def generated(self, gen_dir):
|
||||
basename = os.path.splitext(self.source)[0]
|
||||
@@ -218,12 +211,18 @@ class Antlr3Grammar(Source):
|
||||
for ext in ['Lexer.cpp', 'Lexer.hpp', 'Parser.cpp', 'Parser.hpp']]
|
||||
return [os.path.join(gen_dir, file) for file in files]
|
||||
|
||||
class Json2Code(Source):
|
||||
def __init__(self, source):
|
||||
Source.__init__(self, source, '.hh', '.cc')
|
||||
def headers(self, gen_dir):
|
||||
return [x for x in self.generated(gen_dir) if x.endswith('.hpp')]
|
||||
|
||||
def sources(self, gen_dir):
|
||||
return [x for x in self.generated(gen_dir) if x.endswith('.cpp')]
|
||||
|
||||
def objects(self, gen_dir):
|
||||
return [x.replace('.cpp', '.o') for x in self.sources(gen_dir)]
|
||||
|
||||
def endswith(self, end):
|
||||
return self.source.endswith(end)
|
||||
|
||||
def generated(self, gen_dir):
|
||||
return [os.path.join(gen_dir, self.source + '.hh'), os.path.join(gen_dir, self.source + '.cc')]
|
||||
|
||||
def find_headers(repodir, excluded_dirs):
|
||||
walker = os.walk(repodir)
|
||||
@@ -249,16 +248,16 @@ def find_headers(repodir, excluded_dirs):
|
||||
|
||||
modes = {
|
||||
'debug': {
|
||||
'cxxflags': '-DDEBUG -DDEBUG_LSA_SANITIZER -DSCYLLA_ENABLE_ERROR_INJECTION',
|
||||
'cxxflags': '-DDEBUG -DDEBUG_LSA_SANITIZER -DSEASTAR_ENABLE_ALLOC_FAILURE_INJECTION -DSCYLLA_ENABLE_ERROR_INJECTION',
|
||||
'cxx_ld_flags': '-Wstack-usage=%s' % (1024*40),
|
||||
},
|
||||
'release': {
|
||||
'cxxflags': '',
|
||||
'cxx_ld_flags': '-O3 -Wstack-usage=%s' % (1024*13),
|
||||
'cxx_ld_flags': '-O3 -Wstack-usage=%s' % (1024*29),
|
||||
},
|
||||
'dev': {
|
||||
'cxxflags': '-DSEASTAR_ENABLE_ALLOC_FAILURE_INJECTION -DSCYLLA_ENABLE_ERROR_INJECTION',
|
||||
'cxx_ld_flags': '-O1 -Wstack-usage=%s' % (1024*21),
|
||||
'cxx_ld_flags': '-O1 -Wstack-usage=%s' % (1024*29),
|
||||
},
|
||||
'sanitize': {
|
||||
'cxxflags': '-DDEBUG -DDEBUG_LSA_SANITIZER -DSCYLLA_ENABLE_ERROR_INJECTION',
|
||||
@@ -270,7 +269,6 @@ scylla_tests = set([
|
||||
'test/boost/UUID_test',
|
||||
'test/boost/aggregate_fcts_test',
|
||||
'test/boost/allocation_strategy_test',
|
||||
'test/boost/alternator_base64_test',
|
||||
'test/boost/anchorless_list_test',
|
||||
'test/boost/auth_passwords_test',
|
||||
'test/boost/auth_resource_test',
|
||||
@@ -280,7 +278,6 @@ scylla_tests = set([
|
||||
'test/boost/broken_sstable_test',
|
||||
'test/boost/bytes_ostream_test',
|
||||
'test/boost/cache_flat_mutation_reader_test',
|
||||
'test/boost/cached_file_test',
|
||||
'test/boost/caching_options_test',
|
||||
'test/boost/canonical_mutation_test',
|
||||
'test/boost/cartesian_product_test',
|
||||
@@ -329,7 +326,6 @@ scylla_tests = set([
|
||||
'test/boost/linearizing_input_stream_test',
|
||||
'test/boost/loading_cache_test',
|
||||
'test/boost/log_heap_test',
|
||||
'test/boost/estimated_histogram_test',
|
||||
'test/boost/logalloc_test',
|
||||
'test/boost/managed_vector_test',
|
||||
'test/boost/map_difference_test',
|
||||
@@ -369,7 +365,6 @@ scylla_tests = set([
|
||||
'test/boost/schema_changes_test',
|
||||
'test/boost/sstable_conforms_to_mutation_source_test',
|
||||
'test/boost/sstable_resharding_test',
|
||||
'test/boost/sstable_directory_test',
|
||||
'test/boost/sstable_test',
|
||||
'test/boost/storage_proxy_test',
|
||||
'test/boost/top_k_test',
|
||||
@@ -419,13 +414,11 @@ perf_tests = set([
|
||||
'test/perf/perf_mutation_fragment',
|
||||
'test/perf/perf_idl',
|
||||
'test/perf/perf_vint',
|
||||
'test/perf/perf_big_decimal',
|
||||
])
|
||||
|
||||
apps = set([
|
||||
'scylla',
|
||||
'test/tools/cql_repl',
|
||||
'tools/scylla-types',
|
||||
])
|
||||
|
||||
tests = scylla_tests | perf_tests
|
||||
@@ -446,7 +439,6 @@ arg_parser.add_argument('--so', dest='so', action='store_true',
|
||||
help='Build shared object (SO) instead of executable')
|
||||
arg_parser.add_argument('--mode', action='append', choices=list(modes.keys()), dest='selected_modes')
|
||||
arg_parser.add_argument('--with', dest='artifacts', action='append', choices=all_artifacts, default=[])
|
||||
arg_parser.add_argument('--with-seastar', action='store', dest='seastar_path', default='seastar', help='Path to Seastar sources')
|
||||
arg_parser.add_argument('--cflags', action='store', dest='user_cflags', default='',
|
||||
help='Extra flags for the C++ compiler')
|
||||
arg_parser.add_argument('--ldflags', action='store', dest='user_ldflags', default='',
|
||||
@@ -459,8 +451,8 @@ arg_parser.add_argument('--c-compiler', action='store', dest='cc', default='gcc'
|
||||
help='C compiler path')
|
||||
arg_parser.add_argument('--with-osv', action='store', dest='with_osv', default='',
|
||||
help='Shortcut for compile for OSv')
|
||||
add_tristate(arg_parser, name='dpdk', dest='dpdk',
|
||||
help='Use dpdk (from seastar dpdk sources) (default=True for release builds)')
|
||||
arg_parser.add_argument('--enable-dpdk', action='store_true', dest='dpdk', default=False,
|
||||
help='Enable dpdk (from seastar dpdk sources)')
|
||||
arg_parser.add_argument('--dpdk-target', action='store', dest='dpdk_target', default='',
|
||||
help='Path to DPDK SDK target location (e.g. <DPDK SDK dir>/x86_64-native-linuxapp-gcc)')
|
||||
arg_parser.add_argument('--debuginfo', action='store', dest='debuginfo', type=int, default=1,
|
||||
@@ -477,8 +469,12 @@ arg_parser.add_argument('--tests-debuginfo', action='store', dest='tests_debugin
|
||||
help='Enable(1)/disable(0)compiler debug information generation for tests')
|
||||
arg_parser.add_argument('--python', action='store', dest='python', default='python3',
|
||||
help='Python3 path')
|
||||
add_tristate(arg_parser, name='hwloc', dest='hwloc', help='hwloc support')
|
||||
add_tristate(arg_parser, name='xen', dest='xen', help='Xen support')
|
||||
arg_parser.add_argument('--split-dwarf', dest='split_dwarf', action='store_true', default=False,
|
||||
help='use of split dwarf (https://gcc.gnu.org/wiki/DebugFission) to speed up linking')
|
||||
arg_parser.add_argument('--enable-gcc6-concepts', dest='gcc6_concepts', action='store_true', default=False,
|
||||
help='enable experimental support for C++ Concepts as implemented in GCC 6')
|
||||
arg_parser.add_argument('--enable-alloc-failure-injector', dest='alloc_failure_injector', action='store_true', default=False,
|
||||
help='enable allocation failure injection')
|
||||
arg_parser.add_argument('--with-antlr3', dest='antlr3_exec', action='store', default=None,
|
||||
@@ -497,7 +493,6 @@ extra_cxxflags = {}
|
||||
cassandra_interface = Thrift(source='interface/cassandra.thrift', service='Cassandra')
|
||||
|
||||
scylla_core = (['database.cc',
|
||||
'absl-flat_hash_map.cc',
|
||||
'table.cc',
|
||||
'atomic_cell.cc',
|
||||
'collection_mutation.cc',
|
||||
@@ -516,13 +511,13 @@ scylla_core = (['database.cc',
|
||||
'frozen_mutation.cc',
|
||||
'memtable.cc',
|
||||
'schema_mutations.cc',
|
||||
'supervisor.cc',
|
||||
'utils/logalloc.cc',
|
||||
'utils/large_bitset.cc',
|
||||
'utils/buffer_input_stream.cc',
|
||||
'utils/limiting_data_source.cc',
|
||||
'utils/updateable_value.cc',
|
||||
'utils/directories.cc',
|
||||
'utils/generation-number.cc',
|
||||
'mutation_partition.cc',
|
||||
'mutation_partition_view.cc',
|
||||
'mutation_partition_serializer.cc',
|
||||
@@ -546,18 +541,14 @@ scylla_core = (['database.cc',
|
||||
'sstables/compaction_strategy.cc',
|
||||
'sstables/size_tiered_compaction_strategy.cc',
|
||||
'sstables/leveled_compaction_strategy.cc',
|
||||
'sstables/time_window_compaction_strategy.cc',
|
||||
'sstables/compaction_manager.cc',
|
||||
'sstables/integrity_checked_file_impl.cc',
|
||||
'sstables/prepended_input_stream.cc',
|
||||
'sstables/m_format_read_helpers.cc',
|
||||
'sstables/sstable_directory.cc',
|
||||
'transport/event.cc',
|
||||
'transport/event_notifier.cc',
|
||||
'transport/server.cc',
|
||||
'transport/controller.cc',
|
||||
'transport/messages/result_message.cc',
|
||||
'cdc/cdc_partitioner.cc',
|
||||
'cdc/log.cc',
|
||||
'cdc/split.cc',
|
||||
'cdc/generation.cc',
|
||||
@@ -578,7 +569,6 @@ scylla_core = (['database.cc',
|
||||
'cql3/functions/functions.cc',
|
||||
'cql3/functions/aggregate_fcts.cc',
|
||||
'cql3/functions/castas_fcts.cc',
|
||||
'cql3/functions/error_injection_fcts.cc',
|
||||
'cql3/statements/cf_prop_defs.cc',
|
||||
'cql3/statements/cf_statement.cc',
|
||||
'cql3/statements/authentication_statement.cc',
|
||||
@@ -625,7 +615,6 @@ scylla_core = (['database.cc',
|
||||
'cql3/role_name.cc',
|
||||
'thrift/handler.cc',
|
||||
'thrift/server.cc',
|
||||
'thrift/controller.cc',
|
||||
'thrift/thrift_validation.cc',
|
||||
'utils/runtime.cc',
|
||||
'utils/murmur_hash.cc',
|
||||
@@ -683,7 +672,6 @@ scylla_core = (['database.cc',
|
||||
'db/view/view.cc',
|
||||
'db/view/view_update_generator.cc',
|
||||
'db/view/row_locking.cc',
|
||||
'db/sstables-format-selector.cc',
|
||||
'index/secondary_index_manager.cc',
|
||||
'index/secondary_index.cc',
|
||||
'utils/UUID_gen.cc',
|
||||
@@ -799,47 +787,46 @@ scylla_core = (['database.cc',
|
||||
'utils/like_matcher.cc',
|
||||
'utils/error_injection.cc',
|
||||
'mutation_writer/timestamp_based_splitting_writer.cc',
|
||||
'mutation_writer/shard_based_splitting_writer.cc',
|
||||
'lua.cc',
|
||||
] + [Antlr3Grammar('cql3/Cql.g')] + [Thrift('interface/cassandra.thrift', 'Cassandra')]
|
||||
)
|
||||
|
||||
api = ['api/api.cc',
|
||||
Json2Code('api/api-doc/storage_service.json'),
|
||||
Json2Code('api/api-doc/lsa.json'),
|
||||
'api/api-doc/storage_service.json',
|
||||
'api/api-doc/lsa.json',
|
||||
'api/storage_service.cc',
|
||||
Json2Code('api/api-doc/commitlog.json'),
|
||||
'api/api-doc/commitlog.json',
|
||||
'api/commitlog.cc',
|
||||
Json2Code('api/api-doc/gossiper.json'),
|
||||
'api/api-doc/gossiper.json',
|
||||
'api/gossiper.cc',
|
||||
Json2Code('api/api-doc/failure_detector.json'),
|
||||
'api/api-doc/failure_detector.json',
|
||||
'api/failure_detector.cc',
|
||||
Json2Code('api/api-doc/column_family.json'),
|
||||
'api/api-doc/column_family.json',
|
||||
'api/column_family.cc',
|
||||
'api/messaging_service.cc',
|
||||
Json2Code('api/api-doc/messaging_service.json'),
|
||||
Json2Code('api/api-doc/storage_proxy.json'),
|
||||
'api/api-doc/messaging_service.json',
|
||||
'api/api-doc/storage_proxy.json',
|
||||
'api/storage_proxy.cc',
|
||||
Json2Code('api/api-doc/cache_service.json'),
|
||||
'api/api-doc/cache_service.json',
|
||||
'api/cache_service.cc',
|
||||
Json2Code('api/api-doc/collectd.json'),
|
||||
'api/api-doc/collectd.json',
|
||||
'api/collectd.cc',
|
||||
Json2Code('api/api-doc/endpoint_snitch_info.json'),
|
||||
'api/api-doc/endpoint_snitch_info.json',
|
||||
'api/endpoint_snitch.cc',
|
||||
Json2Code('api/api-doc/compaction_manager.json'),
|
||||
'api/api-doc/compaction_manager.json',
|
||||
'api/compaction_manager.cc',
|
||||
Json2Code('api/api-doc/hinted_handoff.json'),
|
||||
'api/api-doc/hinted_handoff.json',
|
||||
'api/hinted_handoff.cc',
|
||||
Json2Code('api/api-doc/utils.json'),
|
||||
'api/api-doc/utils.json',
|
||||
'api/lsa.cc',
|
||||
Json2Code('api/api-doc/stream_manager.json'),
|
||||
'api/api-doc/stream_manager.json',
|
||||
'api/stream_manager.cc',
|
||||
Json2Code('api/api-doc/system.json'),
|
||||
'api/api-doc/system.json',
|
||||
'api/system.cc',
|
||||
'api/config.cc',
|
||||
Json2Code('api/api-doc/config.json'),
|
||||
'api/error_injection.cc',
|
||||
Json2Code('api/api-doc/error_injection.json'),
|
||||
'api/api-doc/config.json',
|
||||
'api/error_injection.cc',
|
||||
'api/api-doc/error_injection.json',
|
||||
]
|
||||
|
||||
alternator = [
|
||||
@@ -905,15 +892,12 @@ scylla_tests_generic_dependencies = [
|
||||
'test/lib/cql_test_env.cc',
|
||||
'test/lib/test_services.cc',
|
||||
'test/lib/log.cc',
|
||||
'test/lib/reader_permit.cc',
|
||||
'test/lib/test_utils.cc',
|
||||
]
|
||||
|
||||
scylla_tests_dependencies = scylla_core + idls + scylla_tests_generic_dependencies + [
|
||||
'test/lib/cql_assertions.cc',
|
||||
'test/lib/result_set_assertions.cc',
|
||||
'test/lib/mutation_source_test.cc',
|
||||
'test/lib/sstable_utils.cc',
|
||||
'test/lib/data_model.cc',
|
||||
'test/lib/exception_utils.cc',
|
||||
'test/lib/random_schema.cc',
|
||||
@@ -922,8 +906,6 @@ scylla_tests_dependencies = scylla_core + idls + scylla_tests_generic_dependenci
|
||||
deps = {
|
||||
'scylla': idls + ['main.cc', 'release.cc', 'build_id.cc'] + scylla_core + api + alternator + redis,
|
||||
'test/tools/cql_repl': idls + ['test/tools/cql_repl.cc'] + scylla_core + scylla_tests_generic_dependencies,
|
||||
#FIXME: we don't need all of scylla_core here, only the types module, need to modularize scylla_core.
|
||||
'tools/scylla-types': idls + ['tools/scylla-types.cc'] + scylla_core,
|
||||
}
|
||||
|
||||
pure_boost_tests = set([
|
||||
@@ -962,7 +944,6 @@ pure_boost_tests = set([
|
||||
])
|
||||
|
||||
tests_not_using_seastar_test_framework = set([
|
||||
'test/boost/alternator_base64_test',
|
||||
'test/boost/small_vector_test',
|
||||
'test/manual/gossip',
|
||||
'test/manual/message',
|
||||
@@ -972,9 +953,11 @@ tests_not_using_seastar_test_framework = set([
|
||||
'test/perf/perf_hash',
|
||||
'test/perf/perf_mutation',
|
||||
'test/perf/perf_row_cache_update',
|
||||
'test/perf/perf_sstable',
|
||||
'test/unit/lsa_async_eviction_test',
|
||||
'test/unit/lsa_sync_eviction_test',
|
||||
'test/unit/row_cache_alloc_stress_test',
|
||||
'test/unit/row_cache_stress_test',
|
||||
'test/manual/sstable_scan_footprint_test',
|
||||
]) | pure_boost_tests
|
||||
|
||||
@@ -996,10 +979,13 @@ perf_tests_seastar_deps = [
|
||||
for t in perf_tests:
|
||||
deps[t] = [t + '.cc'] + scylla_tests_dependencies + perf_tests_seastar_deps
|
||||
|
||||
deps['test/boost/sstable_test'] += ['test/lib/normalizing_reader.cc']
|
||||
deps['test/boost/sstable_datafile_test'] += ['test/lib/normalizing_reader.cc']
|
||||
deps['test/boost/mutation_reader_test'] += ['test/lib/dummy_sharder.cc' ]
|
||||
deps['test/boost/multishard_combining_reader_as_mutation_source_test'] += ['test/lib/dummy_sharder.cc' ]
|
||||
deps['test/boost/sstable_test'] += ['test/lib/sstable_utils.cc', 'test/lib/normalizing_reader.cc']
|
||||
deps['test/boost/sstable_datafile_test'] += ['test/lib/sstable_utils.cc', 'test/lib/normalizing_reader.cc']
|
||||
deps['test/boost/sstable_resharding_test'] += ['test/lib/sstable_utils.cc' ]
|
||||
deps['test/boost/mutation_reader_test'] += ['test/lib/sstable_utils.cc', 'test/lib/dummy_partitioner.cc' ]
|
||||
deps['test/boost/multishard_combining_reader_as_mutation_source_test'] += ['test/lib/sstable_utils.cc', 'test/lib/dummy_partitioner.cc' ]
|
||||
deps['test/boost/sstable_mutation_test'] += ['test/lib/sstable_utils.cc']
|
||||
deps['test/boost/sstable_conforms_to_mutation_source_test'] += ['test/lib/sstable_utils.cc']
|
||||
|
||||
deps['test/boost/bytes_ostream_test'] = [
|
||||
"test/boost/bytes_ostream_test.cc",
|
||||
@@ -1013,7 +999,6 @@ deps['test/boost/UUID_test'] = ['utils/UUID_gen.cc', 'test/boost/UUID_test.cc',
|
||||
deps['test/boost/murmur_hash_test'] = ['bytes.cc', 'utils/murmur_hash.cc', 'test/boost/murmur_hash_test.cc']
|
||||
deps['test/boost/allocation_strategy_test'] = ['test/boost/allocation_strategy_test.cc', 'utils/logalloc.cc', 'utils/dynamic_bitset.cc']
|
||||
deps['test/boost/log_heap_test'] = ['test/boost/log_heap_test.cc']
|
||||
deps['test/boost/estimated_histogram_test'] = ['test/boost/estimated_histogram_test.cc']
|
||||
deps['test/boost/anchorless_list_test'] = ['test/boost/anchorless_list_test.cc']
|
||||
deps['test/perf/perf_fast_forward'] += ['release.cc']
|
||||
deps['test/perf/perf_simple_query'] += ['release.cc']
|
||||
@@ -1033,7 +1018,6 @@ deps['test/boost/linearizing_input_stream_test'] = [
|
||||
]
|
||||
|
||||
deps['test/boost/duration_test'] += ['test/lib/exception_utils.cc']
|
||||
deps['test/boost/alternator_base64_test'] += ['alternator/base64.cc']
|
||||
|
||||
deps['utils/gz/gen_crc_combine_table'] = ['utils/gz/gen_crc_combine_table.cc']
|
||||
|
||||
@@ -1096,14 +1080,34 @@ else:
|
||||
# a list element means a list of alternative packages to consider
|
||||
# the first element becomes the HAVE_pkg define
|
||||
# a string element is a package name with no alternatives
|
||||
optional_packages = [[]]
|
||||
optional_packages = [['libsystemd', 'libsystemd-daemon']]
|
||||
pkgs = []
|
||||
|
||||
# Lua can be provided by lua53 package on Debian-like
|
||||
# systems and by Lua on others.
|
||||
pkgs.append('lua53' if have_pkg('lua53') else 'lua')
|
||||
|
||||
pkgs.append('libsystemd')
|
||||
|
||||
def setup_first_pkg_of_list(pkglist):
|
||||
# The HAVE_pkg symbol is taken from the first alternative
|
||||
upkg = pkglist[0].upper().replace('-', '_')
|
||||
for pkg in pkglist:
|
||||
if have_pkg(pkg):
|
||||
pkgs.append(pkg)
|
||||
defines.append('HAVE_{}=1'.format(upkg))
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
for pkglist in optional_packages:
|
||||
if isinstance(pkglist, str):
|
||||
pkglist = [pkglist]
|
||||
if not setup_first_pkg_of_list(pkglist):
|
||||
if len(pkglist) == 1:
|
||||
print('Missing optional package {pkglist[0]}'.format(**locals()))
|
||||
else:
|
||||
alternatives = ':'.join(pkglist[1:])
|
||||
print('Missing optional package {pkglist[0]} (or alteratives {alternatives})'.format(**locals()))
|
||||
|
||||
|
||||
compiler_test_src = '''
|
||||
@@ -1176,24 +1180,8 @@ extra_cxxflags["release.cc"] = "-DSCYLLA_VERSION=\"\\\"" + scylla_version + "\\\
|
||||
for m in ['debug', 'release', 'sanitize']:
|
||||
modes[m]['cxxflags'] += ' ' + dbgflag
|
||||
|
||||
# The relocatable package includes its own dynamic linker. We don't
|
||||
# know the path it will be installed to, so for now use a very long
|
||||
# path so that patchelf doesn't need to edit the program headers. The
|
||||
# kernel imposes a limit of 4096 bytes including the null. The other
|
||||
# constraint is that the build-id has to be in the first page, so we
|
||||
# can't use all 4096 bytes for the dynamic linker.
|
||||
# In here we just guess that 2000 extra / should be enough to cover
|
||||
# any path we get installed to but not so large that the build-id is
|
||||
# pushed to the second page.
|
||||
# At the end of the build we check that the build-id is indeed in the
|
||||
# first page. At install time we check that patchelf doesn't modify
|
||||
# the program headers.
|
||||
|
||||
gcc_linker_output = subprocess.check_output(['gcc', '-###', '/dev/null', '-o', 't'], stderr=subprocess.STDOUT).decode('utf-8')
|
||||
original_dynamic_linker = re.search('-dynamic-linker ([^ ]*)', gcc_linker_output).groups()[0]
|
||||
# gdb has a SO_NAME_MAX_PATH_SIZE of 512, so limit the path size to
|
||||
# that. The 512 includes the null at the end, hence the 511 bellow.
|
||||
dynamic_linker = '/' * (511 - len(original_dynamic_linker)) + original_dynamic_linker
|
||||
get_dynamic_linker_output = subprocess.check_output(['./reloc/get-dynamic-linker.sh'], shell=True)
|
||||
dynamic_linker = get_dynamic_linker_output.decode('utf-8').strip()
|
||||
|
||||
forced_ldflags = '-Wl,'
|
||||
|
||||
@@ -1209,14 +1197,13 @@ args.user_ldflags = forced_ldflags + ' ' + args.user_ldflags
|
||||
|
||||
args.user_cflags += ' -Wno-error=stack-usage='
|
||||
|
||||
args.user_cflags += f"-ffile-prefix-map={curdir}=."
|
||||
|
||||
seastar_cflags = args.user_cflags
|
||||
if args.target != '':
|
||||
seastar_cflags += ' -march=' + args.target
|
||||
seastar_ldflags = args.user_ldflags
|
||||
|
||||
libdeflate_cflags = seastar_cflags
|
||||
zstd_cflags = seastar_cflags + ' -Wno-implicit-fallthrough'
|
||||
|
||||
MODE_TO_CMAKE_BUILD_TYPE = {'release' : 'RelWithDebInfo', 'debug' : 'Debug', 'dev' : 'Dev', 'sanitize' : 'Sanitize' }
|
||||
|
||||
@@ -1230,8 +1217,8 @@ def configure_seastar(build_dir, mode):
|
||||
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=ON',
|
||||
'-DSeastar_CXX_FLAGS={}'.format((seastar_cflags + ' ' + modes[mode]['cxx_ld_flags']).replace(' ', ';')),
|
||||
'-DSeastar_LD_FLAGS={}'.format(seastar_ldflags),
|
||||
'-DSeastar_CXX_DIALECT=gnu++20',
|
||||
'-DSeastar_API_LEVEL=4',
|
||||
'-DSeastar_CXX_DIALECT=gnu++17',
|
||||
'-DSeastar_STD_OPTIONAL_VARIANT_STRINGVIEW=ON',
|
||||
'-DSeastar_UNUSED_RESULT_ERROR=ON',
|
||||
]
|
||||
|
||||
@@ -1239,21 +1226,20 @@ def configure_seastar(build_dir, mode):
|
||||
stack_guards = 'ON' if args.stack_guards else 'OFF'
|
||||
seastar_cmake_args += ['-DSeastar_STACK_GUARDS={}'.format(stack_guards)]
|
||||
|
||||
dpdk = args.dpdk
|
||||
if dpdk is None:
|
||||
dpdk = mode == 'release'
|
||||
if dpdk:
|
||||
if args.dpdk:
|
||||
seastar_cmake_args += ['-DSeastar_DPDK=ON', '-DSeastar_DPDK_MACHINE=wsm']
|
||||
if args.gcc6_concepts:
|
||||
seastar_cmake_args += ['-DSeastar_GCC6_CONCEPTS=ON']
|
||||
if args.split_dwarf:
|
||||
seastar_cmake_args += ['-DSeastar_SPLIT_DWARF=ON']
|
||||
if args.alloc_failure_injector:
|
||||
seastar_cmake_args += ['-DSeastar_ALLOC_FAILURE_INJECTION=ON']
|
||||
|
||||
seastar_cmd = ['cmake', '-G', 'Ninja', os.path.relpath(args.seastar_path, seastar_build_dir)] + seastar_cmake_args
|
||||
seastar_cmd = ['cmake', '-G', 'Ninja', os.path.relpath('seastar', seastar_build_dir)] + seastar_cmake_args
|
||||
cmake_dir = seastar_build_dir
|
||||
if dpdk:
|
||||
if args.dpdk:
|
||||
# need to cook first
|
||||
cmake_dir = args.seastar_path # required by cooking.sh
|
||||
cmake_dir = 'seastar' # required by cooking.sh
|
||||
relative_seastar_build_dir = os.path.join('..', seastar_build_dir) # relative to seastar/
|
||||
seastar_cmd = ['./cooking.sh', '-i', 'dpdk', '-d', relative_seastar_build_dir, '--'] + seastar_cmd[4:]
|
||||
|
||||
@@ -1284,6 +1270,25 @@ for mode in build_modes:
|
||||
modes[mode]['seastar_cflags'] = seastar_pc_cflags
|
||||
modes[mode]['seastar_libs'] = seastar_pc_libs
|
||||
|
||||
# We need to use experimental features of the zstd library (to use our own allocators for the (de)compression context),
|
||||
# which are available only when the library is linked statically.
|
||||
def configure_zstd(build_dir, mode):
|
||||
zstd_build_dir = os.path.join(build_dir, mode, 'zstd')
|
||||
|
||||
zstd_cmake_args = [
|
||||
'-DCMAKE_BUILD_TYPE={}'.format(MODE_TO_CMAKE_BUILD_TYPE[mode]),
|
||||
'-DCMAKE_C_COMPILER={}'.format(args.cc),
|
||||
'-DCMAKE_CXX_COMPILER={}'.format(args.cxx),
|
||||
'-DCMAKE_C_FLAGS={}'.format(zstd_cflags),
|
||||
'-DZSTD_BUILD_PROGRAMS=OFF'
|
||||
]
|
||||
|
||||
zstd_cmd = ['cmake', '-G', 'Ninja', os.path.relpath('zstd/build/cmake', zstd_build_dir)] + zstd_cmake_args
|
||||
|
||||
print(zstd_cmd)
|
||||
os.makedirs(zstd_build_dir, exist_ok=True)
|
||||
subprocess.check_call(zstd_cmd, shell=False, cwd=zstd_build_dir)
|
||||
|
||||
def configure_abseil(build_dir, mode):
|
||||
abseil_build_dir = os.path.join(build_dir, mode, 'abseil')
|
||||
|
||||
@@ -1328,17 +1333,12 @@ args.user_cflags += " " + pkg_config('jsoncpp', '--cflags')
|
||||
args.user_cflags += ' -march=' + args.target
|
||||
libs = ' '.join([maybe_static(args.staticyamlcpp, '-lyaml-cpp'), '-latomic', '-llz4', '-lz', '-lsnappy', pkg_config('jsoncpp', '--libs'),
|
||||
' -lstdc++fs', ' -lcrypt', ' -lcryptopp', ' -lpthread',
|
||||
# Must link with static version of libzstd, since
|
||||
# experimental APIs that we use are only present there.
|
||||
maybe_static(True, '-lzstd'),
|
||||
maybe_static(args.staticboost, '-lboost_date_time -lboost_regex -licuuc'), ])
|
||||
|
||||
pkgconfig_libs = [
|
||||
'libxxhash',
|
||||
]
|
||||
xxhash_dir = 'xxHash'
|
||||
|
||||
args.user_cflags += ' ' + ' '.join([pkg_config(lib, '--cflags') for lib in pkgconfig_libs])
|
||||
libs += ' ' + ' '.join([pkg_config(lib, '--libs') for lib in pkgconfig_libs])
|
||||
if not os.path.exists(xxhash_dir) or not os.listdir(xxhash_dir):
|
||||
raise Exception(xxhash_dir + ' is empty. Run "git submodule update --init".')
|
||||
|
||||
if not args.staticboost:
|
||||
args.user_cflags += ' -DBOOST_TEST_DYN_LINK'
|
||||
@@ -1361,7 +1361,7 @@ args.user_cflags += '-I abseil'
|
||||
user_cflags = args.user_cflags + ' -fvisibility=hidden'
|
||||
user_ldflags = args.user_ldflags + ' -fvisibility=hidden'
|
||||
if args.staticcxx:
|
||||
user_ldflags += " -static-libstdc++"
|
||||
user_ldflags += " -static-libgcc -static-libstdc++"
|
||||
if args.staticthrift:
|
||||
thrift_libs = "-Wl,-Bstatic -lthrift -Wl,-Bdynamic"
|
||||
else:
|
||||
@@ -1385,6 +1385,9 @@ if args.ragel_exec:
|
||||
else:
|
||||
ragel_exec = "ragel"
|
||||
|
||||
for mode in build_modes:
|
||||
configure_zstd(outdir, mode)
|
||||
|
||||
for mode in build_modes:
|
||||
configure_abseil(outdir, mode)
|
||||
|
||||
@@ -1411,7 +1414,7 @@ with open(buildfile_tmp, 'w') as f:
|
||||
command = echo -e $text > $out
|
||||
description = GEN $out
|
||||
rule swagger
|
||||
command = {args.seastar_path}/scripts/seastar-json2code.py --create-cc -f $in -o $out
|
||||
command = seastar/scripts/seastar-json2code.py -f $in -o $out
|
||||
description = SWAGGER $out
|
||||
rule serializer
|
||||
command = {python} ./idl-compiler.py --ns ser -f $in -o $out
|
||||
@@ -1433,10 +1436,6 @@ with open(buildfile_tmp, 'w') as f:
|
||||
description = COPY $out
|
||||
rule package
|
||||
command = scripts/create-relocatable-package.py --mode $mode $out
|
||||
rule rpmbuild
|
||||
command = reloc/build_rpm.sh --reloc-pkg $in --builddir $out
|
||||
rule debbuild
|
||||
command = reloc/build_deb.sh --reloc-pkg $in --builddir $out
|
||||
''').format(**globals()))
|
||||
for mode in build_modes:
|
||||
modeval = modes[mode]
|
||||
@@ -1444,7 +1443,7 @@ with open(buildfile_tmp, 'w') as f:
|
||||
f.write(textwrap.dedent('''\
|
||||
cxx_ld_flags_{mode} = {cxx_ld_flags}
|
||||
ld_flags_{mode} = $cxx_ld_flags_{mode}
|
||||
cxxflags_{mode} = $cxx_ld_flags_{mode} {cxxflags} -iquote. -iquote $builddir/{mode}/gen
|
||||
cxxflags_{mode} = $cxx_ld_flags_{mode} {cxxflags} -I. -I $builddir/{mode}/gen
|
||||
libs_{mode} = -l{fmt_lib}
|
||||
seastar_libs_{mode} = {seastar_libs}
|
||||
rule cxx.{mode}
|
||||
@@ -1487,12 +1486,9 @@ with open(buildfile_tmp, 'w') as f:
|
||||
build/{mode}/gen/${{stem}}Parser.cpp
|
||||
description = ANTLR3 $in
|
||||
rule checkhh.{mode}
|
||||
command = $cxx -MD -MT $out -MF $out.d {seastar_cflags} $cxxflags $cxxflags_{mode} $obj_cxxflags --include $in -c -o $out build/{mode}/gen/empty.cc
|
||||
command = $cxx -MD -MT $out -MF $out.d {seastar_cflags} $cxxflags $cxxflags_{mode} $obj_cxxflags -x c++ --include=$in -c -o $out /dev/null
|
||||
description = CHECKHH $in
|
||||
depfile = $out.d
|
||||
rule test.{mode}
|
||||
command = ./test.py --mode={mode}
|
||||
description = TEST {mode}
|
||||
''').format(mode=mode, antlr3_exec=antlr3_exec, fmt_lib=fmt_lib, **modeval))
|
||||
f.write(
|
||||
'build {mode}: phony {artifacts}\n'.format(
|
||||
@@ -1501,7 +1497,7 @@ with open(buildfile_tmp, 'w') as f:
|
||||
)
|
||||
)
|
||||
compiles = {}
|
||||
swaggers = set()
|
||||
swaggers = {}
|
||||
serializers = {}
|
||||
thrifts = set()
|
||||
ragels = {}
|
||||
@@ -1523,13 +1519,12 @@ with open(buildfile_tmp, 'w') as f:
|
||||
objs += dep.objects('$builddir/' + mode + '/gen')
|
||||
if isinstance(dep, Antlr3Grammar):
|
||||
objs += dep.objects('$builddir/' + mode + '/gen')
|
||||
if isinstance(dep, Json2Code):
|
||||
objs += dep.objects('$builddir/' + mode + '/gen')
|
||||
if binary.endswith('.a'):
|
||||
f.write('build $builddir/{}/{}: ar.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
|
||||
else:
|
||||
objs.extend(['$builddir/' + mode + '/' + artifact for artifact in [
|
||||
'libdeflate/libdeflate.a',
|
||||
'zstd/lib/libzstd.a',
|
||||
] + [
|
||||
'abseil/' + x for x in abseil_libs
|
||||
]])
|
||||
@@ -1564,7 +1559,8 @@ with open(buildfile_tmp, 'w') as f:
|
||||
hh = '$builddir/' + mode + '/gen/' + src.replace('.idl.hh', '.dist.hh')
|
||||
serializers[hh] = src
|
||||
elif src.endswith('.json'):
|
||||
swaggers.add(src)
|
||||
hh = '$builddir/' + mode + '/gen/' + src + '.hh'
|
||||
swaggers[hh] = src
|
||||
elif src.endswith('.rl'):
|
||||
hh = '$builddir/' + mode + '/gen/' + src.replace('.rl', '.hh')
|
||||
ragels[hh] = src
|
||||
@@ -1594,26 +1590,13 @@ with open(buildfile_tmp, 'w') as f:
|
||||
)
|
||||
)
|
||||
|
||||
f.write(
|
||||
'build {mode}-test: test.{mode} {test_executables} $builddir/{mode}/test/tools/cql_repl\n'.format(
|
||||
mode=mode,
|
||||
test_executables=' '.join(['$builddir/{}/{}'.format(mode, binary) for binary in tests]),
|
||||
)
|
||||
)
|
||||
f.write(
|
||||
'build {mode}-check: phony {mode}-headers {mode}-test\n'.format(
|
||||
mode=mode,
|
||||
)
|
||||
)
|
||||
|
||||
gen_dir = '$builddir/{}/gen'.format(mode)
|
||||
gen_headers = []
|
||||
for th in thrifts:
|
||||
gen_headers += th.headers('$builddir/{}/gen'.format(mode))
|
||||
for g in antlr3_grammars:
|
||||
gen_headers += g.headers('$builddir/{}/gen'.format(mode))
|
||||
for g in swaggers:
|
||||
gen_headers += g.headers('$builddir/{}/gen'.format(mode))
|
||||
gen_headers += list(swaggers.keys())
|
||||
gen_headers += list(serializers.keys())
|
||||
gen_headers += list(ragels.keys())
|
||||
gen_headers_dep = ' '.join(gen_headers)
|
||||
@@ -1623,13 +1606,9 @@ with open(buildfile_tmp, 'w') as f:
|
||||
f.write('build {}: cxx.{} {} || {} {}\n'.format(obj, mode, src, seastar_dep, gen_headers_dep))
|
||||
if src in extra_cxxflags:
|
||||
f.write(' cxxflags = {seastar_cflags} $cxxflags $cxxflags_{mode} {extra_cxxflags}\n'.format(mode=mode, extra_cxxflags=extra_cxxflags[src], **modeval))
|
||||
for swagger in swaggers:
|
||||
hh = swagger.headers(gen_dir)[0]
|
||||
cc = swagger.sources(gen_dir)[0]
|
||||
obj = swagger.objects(gen_dir)[0]
|
||||
src = swagger.source
|
||||
f.write('build {} | {} : swagger {} | {}/scripts/seastar-json2code.py\n'.format(hh, cc, src, args.seastar_path))
|
||||
f.write('build {}: cxx.{} {}\n'.format(obj, mode, cc))
|
||||
for hh in swaggers:
|
||||
src = swaggers[hh]
|
||||
f.write('build {}: swagger {} | seastar/scripts/seastar-json2code.py\n'.format(hh, src))
|
||||
for hh in serializers:
|
||||
src = serializers[hh]
|
||||
f.write('build {}: serializer {} | idl-compiler.py\n'.format(hh, src))
|
||||
@@ -1655,9 +1634,8 @@ with open(buildfile_tmp, 'w') as f:
|
||||
if has_sanitize_address_use_after_scope:
|
||||
flags += ' -fno-sanitize-address-use-after-scope'
|
||||
f.write(' obj_cxxflags = %s\n' % flags)
|
||||
f.write(f'build build/{mode}/gen/empty.cc: gen\n')
|
||||
for hh in headers:
|
||||
f.write('build $builddir/{mode}/{hh}.o: checkhh.{mode} {hh} | build/{mode}/gen/empty.cc || {gen_headers_dep}\n'.format(
|
||||
f.write('build $builddir/{mode}/{hh}.o: checkhh.{mode} {hh} || {gen_headers_dep}\n'.format(
|
||||
mode=mode, hh=hh, gen_headers_dep=gen_headers_dep))
|
||||
|
||||
f.write('build build/{mode}/seastar/libseastar.a: ninja | always\n'
|
||||
@@ -1678,20 +1656,17 @@ with open(buildfile_tmp, 'w') as f:
|
||||
f.write(textwrap.dedent('''\
|
||||
build build/{mode}/iotune: copy build/{mode}/seastar/apps/iotune/iotune
|
||||
''').format(**locals()))
|
||||
f.write('build build/{mode}/scylla-package.tar.gz: package build/{mode}/scylla build/{mode}/iotune build/SCYLLA-RELEASE-FILE build/SCYLLA-VERSION-FILE build/debian/debian | always\n'.format(**locals()))
|
||||
f.write('build build/{mode}/scylla-package.tar.gz: package build/{mode}/scylla build/{mode}/iotune build/SCYLLA-RELEASE-FILE build/SCYLLA-VERSION-FILE | always\n'.format(**locals()))
|
||||
f.write(' pool = submodule_pool\n')
|
||||
f.write(' mode = {mode}\n'.format(**locals()))
|
||||
f.write(f'build build/dist/{mode}/redhat: rpmbuild build/{mode}/scylla-package.tar.gz\n')
|
||||
f.write(f' pool = submodule_pool\n')
|
||||
f.write(f' mode = {mode}\n')
|
||||
f.write(f'build build/dist/{mode}/debian: debbuild build/{mode}/scylla-package.tar.gz\n')
|
||||
f.write(f' pool = submodule_pool\n')
|
||||
f.write(f' mode = {mode}\n')
|
||||
f.write(f'build dist-server-{mode}: phony build/dist/{mode}/redhat build/dist/{mode}/debian\n')
|
||||
f.write('rule libdeflate.{mode}\n'.format(**locals()))
|
||||
f.write(' command = make -C libdeflate BUILD_DIR=../build/{mode}/libdeflate/ CFLAGS="{libdeflate_cflags}" CC={args.cc} ../build/{mode}/libdeflate//libdeflate.a\n'.format(**locals()))
|
||||
f.write('build build/{mode}/libdeflate/libdeflate.a: libdeflate.{mode}\n'.format(**locals()))
|
||||
f.write(' pool = submodule_pool\n')
|
||||
f.write('build build/{mode}/zstd/lib/libzstd.a: ninja\n'.format(**locals()))
|
||||
f.write(' pool = submodule_pool\n')
|
||||
f.write(' subdir = build/{mode}/zstd\n'.format(**locals()))
|
||||
f.write(' target = libzstd.a\n'.format(**locals()))
|
||||
|
||||
for lib in abseil_libs:
|
||||
f.write('build build/{mode}/abseil/{lib}: ninja\n'.format(**locals()))
|
||||
@@ -1702,77 +1677,11 @@ with open(buildfile_tmp, 'w') as f:
|
||||
mode = 'dev' if 'dev' in modes else modes[0]
|
||||
f.write('build checkheaders: phony || {}\n'.format(' '.join(['$builddir/{}/{}.o'.format(mode, hh) for hh in headers])))
|
||||
|
||||
f.write(
|
||||
'build test: phony {}\n'.format(' '.join(['{mode}-test'.format(mode=mode) for mode in modes]))
|
||||
)
|
||||
f.write(
|
||||
'build check: phony {}\n'.format(' '.join(['{mode}-check'.format(mode=mode) for mode in modes]))
|
||||
)
|
||||
|
||||
f.write(textwrap.dedent(f'''\
|
||||
build dist-server-deb: phony {' '.join(['build/dist/{mode}/debian'.format(mode=mode) for mode in build_modes])}
|
||||
build dist-server-rpm: phony {' '.join(['build/dist/{mode}/redhat'.format(mode=mode) for mode in build_modes])}
|
||||
build dist-server: phony dist-server-rpm dist-server-deb
|
||||
|
||||
rule build-submodule-reloc
|
||||
command = cd $reloc_dir && ./reloc/build_reloc.sh
|
||||
rule build-submodule-rpm
|
||||
command = cd $dir && ./reloc/build_rpm.sh --reloc-pkg $artifact
|
||||
rule build-submodule-deb
|
||||
command = cd $dir && ./reloc/build_deb.sh --reloc-pkg $artifact
|
||||
|
||||
build scylla-jmx/build/scylla-jmx-package.tar.gz: build-submodule-reloc
|
||||
reloc_dir = scylla-jmx
|
||||
build dist-jmx-rpm: build-submodule-rpm scylla-jmx/build/scylla-jmx-package.tar.gz
|
||||
dir = scylla-jmx
|
||||
artifact = build/scylla-jmx-package.tar.gz
|
||||
build dist-jmx-deb: build-submodule-deb scylla-jmx/build/scylla-jmx-package.tar.gz
|
||||
dir = scylla-jmx
|
||||
artifact = build/scylla-jmx-package.tar.gz
|
||||
build dist-jmx: phony dist-jmx-rpm dist-jmx-deb
|
||||
|
||||
build scylla-tools/build/scylla-tools-package.tar.gz: build-submodule-reloc
|
||||
reloc_dir = scylla-tools
|
||||
build dist-tools-rpm: build-submodule-rpm scylla-tools/build/scylla-tools-package.tar.gz
|
||||
dir = scylla-tools
|
||||
artifact = build/scylla-tools-package.tar.gz
|
||||
build dist-tools-deb: build-submodule-deb scylla-tools/build/scylla-tools-package.tar.gz
|
||||
dir = scylla-tools
|
||||
artifact = build/scylla-tools-package.tar.gz
|
||||
build dist-tools: phony dist-tools-rpm dist-tools-deb
|
||||
|
||||
rule build-python-reloc
|
||||
command = ./reloc/python3/build_reloc.sh
|
||||
rule build-python-rpm
|
||||
command = ./reloc/python3/build_rpm.sh
|
||||
rule build-python-deb
|
||||
command = ./reloc/python3/build_deb.sh
|
||||
|
||||
build build/release/scylla-python3-package.tar.gz: build-python-reloc
|
||||
build dist-python-rpm: build-python-rpm build/release/scylla-python3-package.tar.gz
|
||||
build dist-python-deb: build-python-deb build/release/scylla-python3-package.tar.gz
|
||||
build dist-python: phony dist-python-rpm dist-python-deb
|
||||
build dist-deb: phony dist-server-deb dist-python-deb dist-jmx-deb dist-tools-deb
|
||||
build dist-rpm: phony dist-server-rpm dist-python-rpm dist-jmx-rpm dist-tools-rpm
|
||||
build dist: phony dist-server dist-python dist-jmx dist-tools
|
||||
'''))
|
||||
|
||||
f.write(textwrap.dedent(f'''\
|
||||
build dist-check: phony {' '.join(['dist-check-{mode}'.format(mode=mode) for mode in build_modes])}
|
||||
rule dist-check
|
||||
command = ./tools/testing/dist-check/dist-check.sh --mode $mode
|
||||
'''))
|
||||
for mode in build_modes:
|
||||
f.write(textwrap.dedent(f'''\
|
||||
build dist-check-{mode}: dist-check
|
||||
mode = {mode}
|
||||
'''))
|
||||
|
||||
f.write(textwrap.dedent('''\
|
||||
rule configure
|
||||
command = {python} configure.py $configure_args
|
||||
generator = 1
|
||||
build build.ninja: configure | configure.py SCYLLA-VERSION-GEN {args.seastar_path}/CMakeLists.txt
|
||||
build build.ninja: configure | configure.py SCYLLA-VERSION-GEN seastar/CMakeLists.txt
|
||||
rule cscope
|
||||
command = find -name '*.[chS]' -o -name "*.cc" -o -name "*.hh" | cscope -bq -i-
|
||||
description = CSCOPE
|
||||
@@ -1792,9 +1701,6 @@ with open(buildfile_tmp, 'w') as f:
|
||||
rule scylla_version_gen
|
||||
command = ./SCYLLA-VERSION-GEN
|
||||
build build/SCYLLA-RELEASE-FILE build/SCYLLA-VERSION-FILE: scylla_version_gen
|
||||
rule debian_files_gen
|
||||
command = ./dist/debian/debian_files_gen.py
|
||||
build build/debian/debian: debian_files_gen | always
|
||||
''').format(modes_list=' '.join(build_modes), **globals()))
|
||||
|
||||
os.rename(buildfile_tmp, buildfile)
|
||||
|
||||
12
counters.hh
12
counters.hh
@@ -73,9 +73,7 @@ public:
|
||||
return counter_id(utils::make_random_uuid());
|
||||
}
|
||||
};
|
||||
static_assert(
|
||||
std::is_standard_layout_v<counter_id> && std::is_trivial_v<counter_id>,
|
||||
"counter_id should be a POD type");
|
||||
static_assert(std::is_pod<counter_id>::value, "counter_id should be a POD type");
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const counter_id& id);
|
||||
|
||||
@@ -156,10 +154,10 @@ private:
|
||||
// Shared logic for applying counter_shards and counter_shard_views.
|
||||
// T is either counter_shard or basic_counter_shard_view<U>.
|
||||
template<typename T>
|
||||
requires requires(T shard) {
|
||||
{ shard.value() } -> std::same_as<int64_t>;
|
||||
{ shard.logical_clock() } -> std::same_as<int64_t>;
|
||||
}
|
||||
GCC6_CONCEPT(requires requires(T shard) {
|
||||
{ shard.value() } -> int64_t;
|
||||
{ shard.logical_clock() } -> int64_t;
|
||||
})
|
||||
counter_shard& do_apply(T&& other) noexcept {
|
||||
auto other_clock = other.logical_clock();
|
||||
if (_logical_clock < other_clock) {
|
||||
|
||||
358
cql3/Cql.g
358
cql3/Cql.g
@@ -105,8 +105,8 @@ options {
|
||||
using namespace cql3::statements;
|
||||
using namespace cql3::selection;
|
||||
using cql3::cql3_type;
|
||||
using conditions_type = std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>,lw_shared_ptr<cql3::column_condition::raw>>>;
|
||||
using operations_type = std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>, std::unique_ptr<cql3::operation::raw_update>>>;
|
||||
using conditions_type = std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>,::shared_ptr<cql3::column_condition::raw>>>;
|
||||
using operations_type = std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>,::shared_ptr<cql3::operation::raw_update>>>;
|
||||
|
||||
// ANTLR forces us to define a default-initialized return value
|
||||
// for every rule (e.g. [returns ut_name name]), but not every type
|
||||
@@ -255,8 +255,8 @@ struct uninitialized {
|
||||
return to_lower(s) == "true";
|
||||
}
|
||||
|
||||
void add_raw_update(std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>, std::unique_ptr<cql3::operation::raw_update>>>& operations,
|
||||
::shared_ptr<cql3::column_identifier::raw> key, std::unique_ptr<cql3::operation::raw_update> update)
|
||||
void add_raw_update(std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>,::shared_ptr<cql3::operation::raw_update>>>& operations,
|
||||
::shared_ptr<cql3::column_identifier::raw> key, ::shared_ptr<cql3::operation::raw_update> update)
|
||||
{
|
||||
for (auto&& p : operations) {
|
||||
if (*p.first == *key && !p.second->is_compatible_with(update)) {
|
||||
@@ -319,63 +319,63 @@ struct uninitialized {
|
||||
|
||||
/** STATEMENTS **/
|
||||
|
||||
query returns [std::unique_ptr<raw::parsed_statement> stmnt]
|
||||
: st=cqlStatement (';')* EOF { $stmnt = std::move(st); }
|
||||
query returns [shared_ptr<raw::parsed_statement> stmnt]
|
||||
: st=cqlStatement (';')* EOF { $stmnt = st; }
|
||||
;
|
||||
|
||||
cqlStatement returns [std::unique_ptr<raw::parsed_statement> stmt]
|
||||
cqlStatement returns [shared_ptr<raw::parsed_statement> stmt]
|
||||
@after{ if (stmt) { stmt->set_bound_variables(_bind_variables); } }
|
||||
: st1= selectStatement { $stmt = std::move(st1); }
|
||||
| st2= insertStatement { $stmt = std::move(st2); }
|
||||
| st3= updateStatement { $stmt = std::move(st3); }
|
||||
| st4= batchStatement { $stmt = std::move(st4); }
|
||||
| st5= deleteStatement { $stmt = std::move(st5); }
|
||||
| st6= useStatement { $stmt = std::move(st6); }
|
||||
| st7= truncateStatement { $stmt = std::move(st7); }
|
||||
| st8= createKeyspaceStatement { $stmt = std::move(st8); }
|
||||
| st9= createTableStatement { $stmt = std::move(st9); }
|
||||
| st10=createIndexStatement { $stmt = std::move(st10); }
|
||||
| st11=dropKeyspaceStatement { $stmt = std::move(st11); }
|
||||
| st12=dropTableStatement { $stmt = std::move(st12); }
|
||||
| st13=dropIndexStatement { $stmt = std::move(st13); }
|
||||
| st14=alterTableStatement { $stmt = std::move(st14); }
|
||||
| st15=alterKeyspaceStatement { $stmt = std::move(st15); }
|
||||
| st16=grantStatement { $stmt = std::move(st16); }
|
||||
| st17=revokeStatement { $stmt = std::move(st17); }
|
||||
| st18=listPermissionsStatement { $stmt = std::move(st18); }
|
||||
| st19=createUserStatement { $stmt = std::move(st19); }
|
||||
| st20=alterUserStatement { $stmt = std::move(st20); }
|
||||
| st21=dropUserStatement { $stmt = std::move(st21); }
|
||||
| st22=listUsersStatement { $stmt = std::move(st22); }
|
||||
: st1= selectStatement { $stmt = st1; }
|
||||
| st2= insertStatement { $stmt = st2; }
|
||||
| st3= updateStatement { $stmt = st3; }
|
||||
| st4= batchStatement { $stmt = st4; }
|
||||
| st5= deleteStatement { $stmt = st5; }
|
||||
| st6= useStatement { $stmt = st6; }
|
||||
| st7= truncateStatement { $stmt = st7; }
|
||||
| st8= createKeyspaceStatement { $stmt = st8; }
|
||||
| st9= createTableStatement { $stmt = st9; }
|
||||
| st10=createIndexStatement { $stmt = st10; }
|
||||
| st11=dropKeyspaceStatement { $stmt = st11; }
|
||||
| st12=dropTableStatement { $stmt = st12; }
|
||||
| st13=dropIndexStatement { $stmt = st13; }
|
||||
| st14=alterTableStatement { $stmt = st14; }
|
||||
| st15=alterKeyspaceStatement { $stmt = st15; }
|
||||
| st16=grantStatement { $stmt = st16; }
|
||||
| st17=revokeStatement { $stmt = st17; }
|
||||
| st18=listPermissionsStatement { $stmt = st18; }
|
||||
| st19=createUserStatement { $stmt = st19; }
|
||||
| st20=alterUserStatement { $stmt = st20; }
|
||||
| st21=dropUserStatement { $stmt = st21; }
|
||||
| st22=listUsersStatement { $stmt = st22; }
|
||||
#if 0
|
||||
| st23=createTriggerStatement { $stmt = st23; }
|
||||
| st24=dropTriggerStatement { $stmt = st24; }
|
||||
#endif
|
||||
| st25=createTypeStatement { $stmt = std::move(st25); }
|
||||
| st26=alterTypeStatement { $stmt = std::move(st26); }
|
||||
| st27=dropTypeStatement { $stmt = std::move(st27); }
|
||||
| st28=createFunctionStatement { $stmt = std::move(st28); }
|
||||
| st29=dropFunctionStatement { $stmt = std::move(st29); }
|
||||
| st25=createTypeStatement { $stmt = st25; }
|
||||
| st26=alterTypeStatement { $stmt = st26; }
|
||||
| st27=dropTypeStatement { $stmt = st27; }
|
||||
| st28=createFunctionStatement { $stmt = st28; }
|
||||
| st29=dropFunctionStatement { $stmt = st29; }
|
||||
#if 0
|
||||
| st30=createAggregateStatement { $stmt = st30; }
|
||||
| st31=dropAggregateStatement { $stmt = st31; }
|
||||
#endif
|
||||
| st32=createViewStatement { $stmt = std::move(st32); }
|
||||
| st33=alterViewStatement { $stmt = std::move(st33); }
|
||||
| st34=dropViewStatement { $stmt = std::move(st34); }
|
||||
| st35=listRolesStatement { $stmt = std::move(st35); }
|
||||
| st36=grantRoleStatement { $stmt = std::move(st36); }
|
||||
| st37=revokeRoleStatement { $stmt = std::move(st37); }
|
||||
| st38=dropRoleStatement { $stmt = std::move(st38); }
|
||||
| st39=createRoleStatement { $stmt = std::move(st39); }
|
||||
| st40=alterRoleStatement { $stmt = std::move(st40); }
|
||||
| st32=createViewStatement { $stmt = st32; }
|
||||
| st33=alterViewStatement { $stmt = st33; }
|
||||
| st34=dropViewStatement { $stmt = st34; }
|
||||
| st35=listRolesStatement { $stmt = st35; }
|
||||
| st36=grantRoleStatement { $stmt = st36; }
|
||||
| st37=revokeRoleStatement { $stmt = st37; }
|
||||
| st38=dropRoleStatement { $stmt = st38; }
|
||||
| st39=createRoleStatement { $stmt = st39; }
|
||||
| st40=alterRoleStatement { $stmt = st40; }
|
||||
;
|
||||
|
||||
/*
|
||||
* USE <KEYSPACE>;
|
||||
*/
|
||||
useStatement returns [std::unique_ptr<raw::use_statement> stmt]
|
||||
: K_USE ks=keyspaceName { $stmt = std::make_unique<raw::use_statement>(ks); }
|
||||
useStatement returns [::shared_ptr<raw::use_statement> stmt]
|
||||
: K_USE ks=keyspaceName { $stmt = ::make_shared<raw::use_statement>(ks); }
|
||||
;
|
||||
|
||||
/**
|
||||
@@ -384,7 +384,7 @@ useStatement returns [std::unique_ptr<raw::use_statement> stmt]
|
||||
* WHERE KEY = "key1" AND COL > 1 AND COL < 100
|
||||
* LIMIT <NUMBER>;
|
||||
*/
|
||||
selectStatement returns [std::unique_ptr<raw::select_statement> expr]
|
||||
selectStatement returns [shared_ptr<raw::select_statement> expr]
|
||||
@init {
|
||||
bool is_distinct = false;
|
||||
::shared_ptr<cql3::term::raw> limit;
|
||||
@@ -409,7 +409,7 @@ selectStatement returns [std::unique_ptr<raw::select_statement> expr]
|
||||
( K_BYPASS K_CACHE { bypass_cache = true; })?
|
||||
{
|
||||
auto params = make_lw_shared<raw::select_statement::parameters>(std::move(orderings), is_distinct, allow_filtering, is_json, bypass_cache);
|
||||
$expr = std::make_unique<raw::select_statement>(std::move(cf), std::move(params),
|
||||
$expr = ::make_shared<raw::select_statement>(std::move(cf), std::move(params),
|
||||
std::move(sclause), std::move(wclause), std::move(limit), std::move(per_partition_limit),
|
||||
std::move(gbcolumns));
|
||||
}
|
||||
@@ -476,7 +476,7 @@ jsonValue returns [::shared_ptr<cql3::term::raw> value]
|
||||
* USING TIMESTAMP <long>;
|
||||
*
|
||||
*/
|
||||
insertStatement returns [std::unique_ptr<raw::modification_statement> expr]
|
||||
insertStatement returns [::shared_ptr<raw::modification_statement> expr]
|
||||
@init {
|
||||
auto attrs = std::make_unique<cql3::attributes::raw>();
|
||||
std::vector<::shared_ptr<cql3::column_identifier::raw>> column_names;
|
||||
@@ -492,7 +492,7 @@ insertStatement returns [std::unique_ptr<raw::modification_statement> expr]
|
||||
( K_IF K_NOT K_EXISTS { if_not_exists = true; } )?
|
||||
( usingClause[attrs] )?
|
||||
{
|
||||
$expr = std::make_unique<raw::insert_statement>(std::move(cf),
|
||||
$expr = ::make_shared<raw::insert_statement>(std::move(cf),
|
||||
std::move(attrs),
|
||||
std::move(column_names),
|
||||
std::move(values),
|
||||
@@ -504,7 +504,7 @@ insertStatement returns [std::unique_ptr<raw::modification_statement> expr]
|
||||
( K_IF K_NOT K_EXISTS { if_not_exists = true; } )?
|
||||
( usingClause[attrs] )?
|
||||
{
|
||||
$expr = std::make_unique<raw::insert_json_statement>(std::move(cf),
|
||||
$expr = ::make_shared<raw::insert_json_statement>(std::move(cf),
|
||||
std::move(attrs),
|
||||
std::move(json_value),
|
||||
if_not_exists,
|
||||
@@ -528,11 +528,11 @@ usingClauseObjective[std::unique_ptr<cql3::attributes::raw>& attrs]
|
||||
* SET name1 = value1, name2 = value2
|
||||
* WHERE key = value;
|
||||
*/
|
||||
updateStatement returns [std::unique_ptr<raw::update_statement> expr]
|
||||
updateStatement returns [::shared_ptr<raw::update_statement> expr]
|
||||
@init {
|
||||
bool if_exists = false;
|
||||
auto attrs = std::make_unique<cql3::attributes::raw>();
|
||||
std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>, std::unique_ptr<cql3::operation::raw_update>>> operations;
|
||||
std::vector<std::pair<::shared_ptr<cql3::column_identifier::raw>, ::shared_ptr<cql3::operation::raw_update>>> operations;
|
||||
}
|
||||
: K_UPDATE cf=columnFamilyName
|
||||
( usingClause[attrs] )?
|
||||
@@ -540,7 +540,7 @@ updateStatement returns [std::unique_ptr<raw::update_statement> expr]
|
||||
K_WHERE wclause=whereClause
|
||||
( K_IF (K_EXISTS{ if_exists = true; } | conditions=updateConditions) )?
|
||||
{
|
||||
return std::make_unique<raw::update_statement>(std::move(cf),
|
||||
return ::make_shared<raw::update_statement>(std::move(cf),
|
||||
std::move(attrs),
|
||||
std::move(operations),
|
||||
std::move(wclause),
|
||||
@@ -560,10 +560,10 @@ updateConditions returns [conditions_type conditions]
|
||||
* WHERE KEY = keyname
|
||||
[IF (EXISTS | name = value, ...)];
|
||||
*/
|
||||
deleteStatement returns [std::unique_ptr<raw::delete_statement> expr]
|
||||
deleteStatement returns [::shared_ptr<raw::delete_statement> expr]
|
||||
@init {
|
||||
auto attrs = std::make_unique<cql3::attributes::raw>();
|
||||
std::vector<std::unique_ptr<cql3::operation::raw_deletion>> column_deletions;
|
||||
std::vector<::shared_ptr<cql3::operation::raw_deletion>> column_deletions;
|
||||
bool if_exists = false;
|
||||
}
|
||||
: K_DELETE ( dels=deleteSelection { column_deletions = std::move(dels); } )?
|
||||
@@ -572,7 +572,7 @@ deleteStatement returns [std::unique_ptr<raw::delete_statement> expr]
|
||||
K_WHERE wclause=whereClause
|
||||
( K_IF ( K_EXISTS { if_exists = true; } | conditions=updateConditions ))?
|
||||
{
|
||||
return std::make_unique<raw::delete_statement>(cf,
|
||||
return ::make_shared<raw::delete_statement>(cf,
|
||||
std::move(attrs),
|
||||
std::move(column_deletions),
|
||||
std::move(wclause),
|
||||
@@ -581,15 +581,15 @@ deleteStatement returns [std::unique_ptr<raw::delete_statement> expr]
|
||||
}
|
||||
;
|
||||
|
||||
deleteSelection returns [std::vector<std::unique_ptr<cql3::operation::raw_deletion>> operations]
|
||||
deleteSelection returns [std::vector<::shared_ptr<cql3::operation::raw_deletion>> operations]
|
||||
: t1=deleteOp { $operations.emplace_back(std::move(t1)); }
|
||||
(',' tN=deleteOp { $operations.emplace_back(std::move(tN)); })*
|
||||
;
|
||||
|
||||
deleteOp returns [std::unique_ptr<cql3::operation::raw_deletion> op]
|
||||
: c=cident { $op = std::make_unique<cql3::operation::column_deletion>(std::move(c)); }
|
||||
| c=cident '[' t=term ']' { $op = std::make_unique<cql3::operation::element_deletion>(std::move(c), std::move(t)); }
|
||||
| c=cident '.' field=ident { $op = std::make_unique<cql3::operation::field_deletion>(std::move(c), std::move(field)); }
|
||||
deleteOp returns [::shared_ptr<cql3::operation::raw_deletion> op]
|
||||
: c=cident { $op = ::make_shared<cql3::operation::column_deletion>(std::move(c)); }
|
||||
| c=cident '[' t=term ']' { $op = ::make_shared<cql3::operation::element_deletion>(std::move(c), std::move(t)); }
|
||||
| c=cident '.' field=ident { $op = ::make_shared<cql3::operation::field_deletion>(std::move(c), std::move(field)); }
|
||||
;
|
||||
|
||||
usingClauseDelete[std::unique_ptr<cql3::attributes::raw>& attrs]
|
||||
@@ -620,11 +620,11 @@ usingClauseDelete[std::unique_ptr<cql3::attributes::raw>& attrs]
|
||||
* ...
|
||||
* APPLY BATCH
|
||||
*/
|
||||
batchStatement returns [std::unique_ptr<cql3::statements::raw::batch_statement> expr]
|
||||
batchStatement returns [shared_ptr<cql3::statements::raw::batch_statement> expr]
|
||||
@init {
|
||||
using btype = cql3::statements::raw::batch_statement::type;
|
||||
btype type = btype::LOGGED;
|
||||
std::vector<std::unique_ptr<cql3::statements::raw::modification_statement>> statements;
|
||||
std::vector<shared_ptr<cql3::statements::raw::modification_statement>> statements;
|
||||
auto attrs = std::make_unique<cql3::attributes::raw>();
|
||||
}
|
||||
: K_BEGIN
|
||||
@@ -633,14 +633,14 @@ batchStatement returns [std::unique_ptr<cql3::statements::raw::batch_statement>
|
||||
( s=batchStatementObjective ';'? { statements.push_back(std::move(s)); } )*
|
||||
K_APPLY K_BATCH
|
||||
{
|
||||
$expr = std::make_unique<cql3::statements::raw::batch_statement>(type, std::move(attrs), std::move(statements));
|
||||
$expr = ::make_shared<cql3::statements::raw::batch_statement>(type, std::move(attrs), std::move(statements));
|
||||
}
|
||||
;
|
||||
|
||||
batchStatementObjective returns [std::unique_ptr<cql3::statements::raw::modification_statement> statement]
|
||||
: i=insertStatement { $statement = std::move(i); }
|
||||
| u=updateStatement { $statement = std::move(u); }
|
||||
| d=deleteStatement { $statement = std::move(d); }
|
||||
batchStatementObjective returns [shared_ptr<cql3::statements::raw::modification_statement> statement]
|
||||
: i=insertStatement { $statement = i; }
|
||||
| u=updateStatement { $statement = u; }
|
||||
| d=deleteStatement { $statement = d; }
|
||||
;
|
||||
|
||||
#if 0
|
||||
@@ -694,7 +694,7 @@ dropAggregateStatement returns [DropAggregateStatement expr]
|
||||
;
|
||||
#endif
|
||||
|
||||
createFunctionStatement returns [std::unique_ptr<cql3::statements::create_function_statement> expr]
|
||||
createFunctionStatement returns [shared_ptr<cql3::statements::create_function_statement> expr]
|
||||
@init {
|
||||
bool or_replace = false;
|
||||
bool if_not_exists = false;
|
||||
@@ -719,10 +719,10 @@ createFunctionStatement returns [std::unique_ptr<cql3::statements::create_functi
|
||||
K_RETURNS rt = comparatorType
|
||||
K_LANGUAGE language = IDENT
|
||||
K_AS body = STRING_LITERAL
|
||||
{ $expr = std::make_unique<cql3::statements::create_function_statement>(std::move(fn), to_lower($language.text), $body.text, std::move(arg_names), std::move(arg_types), std::move(rt), called_on_null_input, or_replace, if_not_exists); }
|
||||
{ $expr = ::make_shared<cql3::statements::create_function_statement>(std::move(fn), to_lower($language.text), $body.text, std::move(arg_names), std::move(arg_types), std::move(rt), called_on_null_input, or_replace, if_not_exists); }
|
||||
;
|
||||
|
||||
dropFunctionStatement returns [std::unique_ptr<cql3::statements::drop_function_statement> expr]
|
||||
dropFunctionStatement returns [shared_ptr<cql3::statements::drop_function_statement> expr]
|
||||
@init {
|
||||
bool if_exists = false;
|
||||
std::vector<shared_ptr<cql3_type::raw>> arg_types;
|
||||
@@ -740,19 +740,19 @@ dropFunctionStatement returns [std::unique_ptr<cql3::statements::drop_function_s
|
||||
')'
|
||||
{ args_present = true; }
|
||||
)?
|
||||
{ $expr = std::make_unique<cql3::statements::drop_function_statement>(std::move(fn), std::move(arg_types), args_present, if_exists); }
|
||||
{ $expr = ::make_shared<cql3::statements::drop_function_statement>(std::move(fn), std::move(arg_types), args_present, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* CREATE KEYSPACE [IF NOT EXISTS] <KEYSPACE> WITH attr1 = value1 AND attr2 = value2;
|
||||
*/
|
||||
createKeyspaceStatement returns [std::unique_ptr<cql3::statements::create_keyspace_statement> expr]
|
||||
createKeyspaceStatement returns [shared_ptr<cql3::statements::create_keyspace_statement> expr]
|
||||
@init {
|
||||
auto attrs = make_shared<cql3::statements::ks_prop_defs>();
|
||||
bool if_not_exists = false;
|
||||
}
|
||||
: K_CREATE K_KEYSPACE (K_IF K_NOT K_EXISTS { if_not_exists = true; } )? ks=keyspaceName
|
||||
K_WITH properties[*attrs] { $expr = std::make_unique<cql3::statements::create_keyspace_statement>(ks, attrs, if_not_exists); }
|
||||
K_WITH properties[attrs] { $expr = ::make_shared<cql3::statements::create_keyspace_statement>(ks, attrs, if_not_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
@@ -762,33 +762,33 @@ createKeyspaceStatement returns [std::unique_ptr<cql3::statements::create_keyspa
|
||||
* <name3> <type>
|
||||
* ) WITH <property> = <value> AND ...;
|
||||
*/
|
||||
createTableStatement returns [std::unique_ptr<cql3::statements::create_table_statement::raw_statement> expr]
|
||||
createTableStatement returns [shared_ptr<cql3::statements::create_table_statement::raw_statement> expr]
|
||||
@init { bool if_not_exists = false; }
|
||||
: K_CREATE K_COLUMNFAMILY (K_IF K_NOT K_EXISTS { if_not_exists = true; } )?
|
||||
cf=columnFamilyName { $expr = std::make_unique<cql3::statements::create_table_statement::raw_statement>(cf, if_not_exists); }
|
||||
cfamDefinition[*expr]
|
||||
cf=columnFamilyName { $expr = make_shared<cql3::statements::create_table_statement::raw_statement>(cf, if_not_exists); }
|
||||
cfamDefinition[expr]
|
||||
;
|
||||
|
||||
cfamDefinition[cql3::statements::create_table_statement::raw_statement& expr]
|
||||
cfamDefinition[shared_ptr<cql3::statements::create_table_statement::raw_statement> expr]
|
||||
: '(' cfamColumns[expr] ( ',' cfamColumns[expr]? )* ')'
|
||||
( K_WITH cfamProperty[$expr.properties()] ( K_AND cfamProperty[$expr.properties()] )*)?
|
||||
( K_WITH cfamProperty[$expr->properties()] ( K_AND cfamProperty[$expr->properties()] )*)?
|
||||
;
|
||||
|
||||
cfamColumns[cql3::statements::create_table_statement::raw_statement& expr]
|
||||
cfamColumns[shared_ptr<cql3::statements::create_table_statement::raw_statement> expr]
|
||||
@init { bool is_static=false; }
|
||||
: k=ident v=comparatorType (K_STATIC {is_static = true;})? { $expr.add_definition(k, v, is_static); }
|
||||
(K_PRIMARY K_KEY { $expr.add_key_aliases(std::vector<shared_ptr<cql3::column_identifier>>{k}); })?
|
||||
| K_PRIMARY K_KEY '(' pkDef[expr] (',' c=ident { $expr.add_column_alias(c); } )* ')'
|
||||
: k=ident v=comparatorType (K_STATIC {is_static = true;})? { $expr->add_definition(k, v, is_static); }
|
||||
(K_PRIMARY K_KEY { $expr->add_key_aliases(std::vector<shared_ptr<cql3::column_identifier>>{k}); })?
|
||||
| K_PRIMARY K_KEY '(' pkDef[expr] (',' c=ident { $expr->add_column_alias(c); } )* ')'
|
||||
;
|
||||
|
||||
pkDef[cql3::statements::create_table_statement::raw_statement& expr]
|
||||
pkDef[shared_ptr<cql3::statements::create_table_statement::raw_statement> expr]
|
||||
@init { std::vector<shared_ptr<cql3::column_identifier>> l; }
|
||||
: k=ident { $expr.add_key_aliases(std::vector<shared_ptr<cql3::column_identifier>>{k}); }
|
||||
| '(' k1=ident { l.push_back(k1); } ( ',' kn=ident { l.push_back(kn); } )* ')' { $expr.add_key_aliases(l); }
|
||||
: k=ident { $expr->add_key_aliases(std::vector<shared_ptr<cql3::column_identifier>>{k}); }
|
||||
| '(' k1=ident { l.push_back(k1); } ( ',' kn=ident { l.push_back(kn); } )* ')' { $expr->add_key_aliases(l); }
|
||||
;
|
||||
|
||||
cfamProperty[cql3::statements::cf_properties& expr]
|
||||
: property[*$expr.properties()]
|
||||
: property[$expr.properties()]
|
||||
| K_COMPACT K_STORAGE { $expr.set_compact_storage(); }
|
||||
| K_CLUSTERING K_ORDER K_BY '(' cfamOrdering[expr] (',' cfamOrdering[expr])* ')'
|
||||
;
|
||||
@@ -806,15 +806,15 @@ cfamOrdering[cql3::statements::cf_properties& expr]
|
||||
* ....
|
||||
* )
|
||||
*/
|
||||
createTypeStatement returns [std::unique_ptr<create_type_statement> expr]
|
||||
createTypeStatement returns [::shared_ptr<create_type_statement> expr]
|
||||
@init { bool if_not_exists = false; }
|
||||
: K_CREATE K_TYPE (K_IF K_NOT K_EXISTS { if_not_exists = true; } )?
|
||||
tn=userTypeName { $expr = std::make_unique<create_type_statement>(tn, if_not_exists); }
|
||||
'(' typeColumns[*expr] ( ',' typeColumns[*expr]? )* ')'
|
||||
tn=userTypeName { $expr = ::make_shared<create_type_statement>(tn, if_not_exists); }
|
||||
'(' typeColumns[expr] ( ',' typeColumns[expr]? )* ')'
|
||||
;
|
||||
|
||||
typeColumns[create_type_statement& expr]
|
||||
: k=ident v=comparatorType { $expr.add_definition(k, v); }
|
||||
typeColumns[::shared_ptr<create_type_statement> expr]
|
||||
: k=ident v=comparatorType { $expr->add_definition(k, v); }
|
||||
;
|
||||
|
||||
|
||||
@@ -822,7 +822,7 @@ typeColumns[create_type_statement& expr]
|
||||
* CREATE INDEX [IF NOT EXISTS] [indexName] ON <columnFamily> (<columnName>);
|
||||
* CREATE CUSTOM INDEX [IF NOT EXISTS] [indexName] ON <columnFamily> (<columnName>) USING <indexClass>;
|
||||
*/
|
||||
createIndexStatement returns [std::unique_ptr<create_index_statement> expr]
|
||||
createIndexStatement returns [::shared_ptr<create_index_statement> expr]
|
||||
@init {
|
||||
auto props = make_shared<index_prop_defs>();
|
||||
bool if_not_exists = false;
|
||||
@@ -830,10 +830,10 @@ createIndexStatement returns [std::unique_ptr<create_index_statement> expr]
|
||||
std::vector<::shared_ptr<index_target::raw>> targets;
|
||||
}
|
||||
: K_CREATE (K_CUSTOM { props->is_custom = true; })? K_INDEX (K_IF K_NOT K_EXISTS { if_not_exists = true; } )?
|
||||
(idxName[*name])? K_ON cf=columnFamilyName '(' (target1=indexIdent { targets.emplace_back(target1); } (',' target2=indexIdent { targets.emplace_back(target2); } )*)? ')'
|
||||
(idxName[name])? K_ON cf=columnFamilyName '(' (target1=indexIdent { targets.emplace_back(target1); } (',' target2=indexIdent { targets.emplace_back(target2); } )*)? ')'
|
||||
(K_USING cls=STRING_LITERAL { props->custom_class = sstring{$cls.text}; })?
|
||||
(K_WITH properties[*props])?
|
||||
{ $expr = std::make_unique<create_index_statement>(cf, name, targets, props, if_not_exists); }
|
||||
(K_WITH properties[props])?
|
||||
{ $expr = ::make_shared<create_index_statement>(cf, name, targets, props, if_not_exists); }
|
||||
;
|
||||
|
||||
indexIdent returns [::shared_ptr<index_target::raw> id]
|
||||
@@ -856,7 +856,7 @@ indexIdent returns [::shared_ptr<index_target::raw> id]
|
||||
* PRIMARY KEY (<pkColumns>)
|
||||
* WITH <property> = <value> AND ...;
|
||||
*/
|
||||
createViewStatement returns [std::unique_ptr<create_view_statement> expr]
|
||||
createViewStatement returns [::shared_ptr<create_view_statement> expr]
|
||||
@init {
|
||||
bool if_not_exists = false;
|
||||
std::vector<::shared_ptr<cql3::column_identifier::raw>> partition_keys;
|
||||
@@ -870,7 +870,7 @@ createViewStatement returns [std::unique_ptr<create_view_statement> expr]
|
||||
| '(' k1=cident { partition_keys.push_back(k1); } ( ',' cn=cident { composite_keys.push_back(cn); } )* ')'
|
||||
)
|
||||
{
|
||||
$expr = std::make_unique<create_view_statement>(
|
||||
$expr = ::make_shared<create_view_statement>(
|
||||
std::move(cf),
|
||||
std::move(basecf),
|
||||
std::move(sclause),
|
||||
@@ -909,12 +909,12 @@ dropTriggerStatement returns [DropTriggerStatement expr]
|
||||
/**
|
||||
* ALTER KEYSPACE <KS> WITH <property> = <value>;
|
||||
*/
|
||||
alterKeyspaceStatement returns [std::unique_ptr<cql3::statements::alter_keyspace_statement> expr]
|
||||
alterKeyspaceStatement returns [shared_ptr<cql3::statements::alter_keyspace_statement> expr]
|
||||
@init {
|
||||
auto attrs = make_shared<cql3::statements::ks_prop_defs>();
|
||||
}
|
||||
: K_ALTER K_KEYSPACE ks=keyspaceName
|
||||
K_WITH properties[*attrs] { $expr = std::make_unique<cql3::statements::alter_keyspace_statement>(ks, attrs); }
|
||||
K_WITH properties[attrs] { $expr = ::make_shared<cql3::statements::alter_keyspace_statement>(ks, attrs); }
|
||||
;
|
||||
|
||||
/**
|
||||
@@ -924,7 +924,7 @@ alterKeyspaceStatement returns [std::unique_ptr<cql3::statements::alter_keyspace
|
||||
* ALTER COLUMN FAMILY <CF> WITH <property> = <value>;
|
||||
* ALTER COLUMN FAMILY <CF> RENAME <column> TO <column>;
|
||||
*/
|
||||
alterTableStatement returns [std::unique_ptr<alter_table_statement> expr]
|
||||
alterTableStatement returns [shared_ptr<alter_table_statement> expr]
|
||||
@init {
|
||||
alter_table_statement::type type;
|
||||
auto props = make_shared<cql3::statements::cf_prop_defs>();
|
||||
@@ -943,13 +943,13 @@ alterTableStatement returns [std::unique_ptr<alter_table_statement> expr]
|
||||
| '(' id1=cident { column_changes.emplace_back(alter_table_statement::column_change{id1}); }
|
||||
(',' idn=cident { column_changes.emplace_back(alter_table_statement::column_change{idn}); } )* ')'
|
||||
)
|
||||
| K_WITH properties[*props] { type = alter_table_statement::type::opts; }
|
||||
| K_WITH properties[props] { type = alter_table_statement::type::opts; }
|
||||
| K_RENAME { type = alter_table_statement::type::rename; }
|
||||
id1=cident K_TO toId1=cident { renames.emplace_back(id1, toId1); }
|
||||
( K_AND idn=cident K_TO toIdn=cident { renames.emplace_back(idn, toIdn); } )*
|
||||
)
|
||||
{
|
||||
$expr = std::make_unique<alter_table_statement>(std::move(cf), type, std::move(column_changes), std::move(props), std::move(renames));
|
||||
$expr = ::make_shared<alter_table_statement>(std::move(cf), type, std::move(column_changes), std::move(props), std::move(renames));
|
||||
}
|
||||
;
|
||||
|
||||
@@ -968,126 +968,126 @@ cfisStatic returns [bool isStaticColumn]
|
||||
* ALTER TYPE <name> ADD <field> <newtype>;
|
||||
* ALTER TYPE <name> RENAME <field> TO <newtype> AND ...;
|
||||
*/
|
||||
alterTypeStatement returns [std::unique_ptr<alter_type_statement> expr]
|
||||
alterTypeStatement returns [::shared_ptr<alter_type_statement> expr]
|
||||
: K_ALTER K_TYPE name=userTypeName
|
||||
( K_ALTER f=ident K_TYPE v=comparatorType { $expr = std::make_unique<alter_type_statement::add_or_alter>(name, false, f, v); }
|
||||
| K_ADD f=ident v=comparatorType { $expr = std::make_unique<alter_type_statement::add_or_alter>(name, true, f, v); }
|
||||
( K_ALTER f=ident K_TYPE v=comparatorType { $expr = ::make_shared<alter_type_statement::add_or_alter>(name, false, f, v); }
|
||||
| K_ADD f=ident v=comparatorType { $expr = ::make_shared<alter_type_statement::add_or_alter>(name, true, f, v); }
|
||||
| K_RENAME
|
||||
{ $expr = std::make_unique<alter_type_statement::renames>(name); }
|
||||
renames[{ static_cast<alter_type_statement::renames&>(*$expr) }]
|
||||
{ $expr = ::make_shared<alter_type_statement::renames>(name); }
|
||||
renames[{ static_pointer_cast<alter_type_statement::renames>($expr) }]
|
||||
)
|
||||
;
|
||||
|
||||
/**
|
||||
* ALTER MATERIALIZED VIEW <CF> WITH <property> = <value>;
|
||||
*/
|
||||
alterViewStatement returns [std::unique_ptr<alter_view_statement> expr]
|
||||
alterViewStatement returns [::shared_ptr<alter_view_statement> expr]
|
||||
@init {
|
||||
auto props = make_shared<cql3::statements::cf_prop_defs>();
|
||||
}
|
||||
: K_ALTER K_MATERIALIZED K_VIEW cf=columnFamilyName K_WITH properties[*props]
|
||||
: K_ALTER K_MATERIALIZED K_VIEW cf=columnFamilyName K_WITH properties[props]
|
||||
{
|
||||
$expr = std::make_unique<alter_view_statement>(std::move(cf), std::move(props));
|
||||
$expr = ::make_shared<alter_view_statement>(std::move(cf), std::move(props));
|
||||
}
|
||||
;
|
||||
|
||||
renames[alter_type_statement::renames& expr]
|
||||
: fromId=ident K_TO toId=ident { $expr.add_rename(fromId, toId); }
|
||||
renames[::shared_ptr<alter_type_statement::renames> expr]
|
||||
: fromId=ident K_TO toId=ident { $expr->add_rename(fromId, toId); }
|
||||
( K_AND renames[$expr] )?
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP KEYSPACE [IF EXISTS] <KSP>;
|
||||
*/
|
||||
dropKeyspaceStatement returns [std::unique_ptr<drop_keyspace_statement> ksp]
|
||||
dropKeyspaceStatement returns [::shared_ptr<drop_keyspace_statement> ksp]
|
||||
@init { bool if_exists = false; }
|
||||
: K_DROP K_KEYSPACE (K_IF K_EXISTS { if_exists = true; } )? ks=keyspaceName { $ksp = std::make_unique<drop_keyspace_statement>(ks, if_exists); }
|
||||
: K_DROP K_KEYSPACE (K_IF K_EXISTS { if_exists = true; } )? ks=keyspaceName { $ksp = ::make_shared<drop_keyspace_statement>(ks, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP COLUMNFAMILY [IF EXISTS] <CF>;
|
||||
*/
|
||||
dropTableStatement returns [std::unique_ptr<drop_table_statement> stmt]
|
||||
dropTableStatement returns [::shared_ptr<drop_table_statement> stmt]
|
||||
@init { bool if_exists = false; }
|
||||
: K_DROP K_COLUMNFAMILY (K_IF K_EXISTS { if_exists = true; } )? cf=columnFamilyName { $stmt = std::make_unique<drop_table_statement>(cf, if_exists); }
|
||||
: K_DROP K_COLUMNFAMILY (K_IF K_EXISTS { if_exists = true; } )? cf=columnFamilyName { $stmt = ::make_shared<drop_table_statement>(cf, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP TYPE <name>;
|
||||
*/
|
||||
dropTypeStatement returns [std::unique_ptr<drop_type_statement> stmt]
|
||||
dropTypeStatement returns [::shared_ptr<drop_type_statement> stmt]
|
||||
@init { bool if_exists = false; }
|
||||
: K_DROP K_TYPE (K_IF K_EXISTS { if_exists = true; } )? name=userTypeName { $stmt = std::make_unique<drop_type_statement>(name, if_exists); }
|
||||
: K_DROP K_TYPE (K_IF K_EXISTS { if_exists = true; } )? name=userTypeName { $stmt = ::make_shared<drop_type_statement>(name, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP MATERIALIZED VIEW [IF EXISTS] <view_name>
|
||||
*/
|
||||
dropViewStatement returns [std::unique_ptr<drop_view_statement> stmt]
|
||||
dropViewStatement returns [::shared_ptr<drop_view_statement> stmt]
|
||||
@init { bool if_exists = false; }
|
||||
: K_DROP K_MATERIALIZED K_VIEW (K_IF K_EXISTS { if_exists = true; } )? cf=columnFamilyName
|
||||
{ $stmt = std::make_unique<drop_view_statement>(cf, if_exists); }
|
||||
{ $stmt = ::make_shared<drop_view_statement>(cf, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP INDEX [IF EXISTS] <INDEX_NAME>
|
||||
*/
|
||||
dropIndexStatement returns [std::unique_ptr<drop_index_statement> expr]
|
||||
dropIndexStatement returns [::shared_ptr<drop_index_statement> expr]
|
||||
@init { bool if_exists = false; }
|
||||
: K_DROP K_INDEX (K_IF K_EXISTS { if_exists = true; } )? index=indexName
|
||||
{ $expr = std::make_unique<drop_index_statement>(index, if_exists); }
|
||||
{ $expr = ::make_shared<drop_index_statement>(index, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* TRUNCATE <CF>;
|
||||
*/
|
||||
truncateStatement returns [std::unique_ptr<truncate_statement> stmt]
|
||||
: K_TRUNCATE (K_COLUMNFAMILY)? cf=columnFamilyName { $stmt = std::make_unique<truncate_statement>(cf); }
|
||||
truncateStatement returns [::shared_ptr<truncate_statement> stmt]
|
||||
: K_TRUNCATE (K_COLUMNFAMILY)? cf=columnFamilyName { $stmt = ::make_shared<truncate_statement>(cf); }
|
||||
;
|
||||
|
||||
/**
|
||||
* GRANT <permission> ON <resource> TO <grantee>
|
||||
*/
|
||||
grantStatement returns [std::unique_ptr<grant_statement> stmt]
|
||||
grantStatement returns [::shared_ptr<grant_statement> stmt]
|
||||
: K_GRANT
|
||||
permissionOrAll
|
||||
K_ON
|
||||
resource
|
||||
K_TO
|
||||
grantee=userOrRoleName
|
||||
{ $stmt = std::make_unique<grant_statement>($permissionOrAll.perms, $resource.res, std::move(grantee)); }
|
||||
{ $stmt = ::make_shared<grant_statement>($permissionOrAll.perms, $resource.res, std::move(grantee)); }
|
||||
;
|
||||
|
||||
/**
|
||||
* REVOKE <permission> ON <resource> FROM <revokee>
|
||||
*/
|
||||
revokeStatement returns [std::unique_ptr<revoke_statement> stmt]
|
||||
revokeStatement returns [::shared_ptr<revoke_statement> stmt]
|
||||
: K_REVOKE
|
||||
permissionOrAll
|
||||
K_ON
|
||||
resource
|
||||
K_FROM
|
||||
revokee=userOrRoleName
|
||||
{ $stmt = std::make_unique<revoke_statement>($permissionOrAll.perms, $resource.res, std::move(revokee)); }
|
||||
{ $stmt = ::make_shared<revoke_statement>($permissionOrAll.perms, $resource.res, std::move(revokee)); }
|
||||
;
|
||||
|
||||
/**
|
||||
* GRANT <rolename> to <grantee>
|
||||
*/
|
||||
grantRoleStatement returns [std::unique_ptr<grant_role_statement> stmt]
|
||||
grantRoleStatement returns [::shared_ptr<grant_role_statement> stmt]
|
||||
: K_GRANT role=userOrRoleName K_TO grantee=userOrRoleName
|
||||
{ $stmt = std::make_unique<grant_role_statement>(std::move(role), std::move(grantee)); }
|
||||
{ $stmt = ::make_shared<grant_role_statement>(std::move(role), std::move(grantee)); }
|
||||
;
|
||||
|
||||
/**
|
||||
* REVOKE <rolename> FROM <revokee>
|
||||
*/
|
||||
revokeRoleStatement returns [std::unique_ptr<revoke_role_statement> stmt]
|
||||
revokeRoleStatement returns [::shared_ptr<revoke_role_statement> stmt]
|
||||
: K_REVOKE role=userOrRoleName K_FROM revokee=userOrRoleName
|
||||
{ $stmt = std::make_unique<revoke_role_statement>(std::move(role), std::move(revokee)); }
|
||||
{ $stmt = ::make_shared<revoke_role_statement>(std::move(role), std::move(revokee)); }
|
||||
;
|
||||
|
||||
listPermissionsStatement returns [std::unique_ptr<list_permissions_statement> stmt]
|
||||
listPermissionsStatement returns [::shared_ptr<list_permissions_statement> stmt]
|
||||
@init {
|
||||
std::optional<auth::resource> r;
|
||||
std::optional<sstring> role;
|
||||
@@ -1098,7 +1098,7 @@ listPermissionsStatement returns [std::unique_ptr<list_permissions_statement> st
|
||||
( K_ON resource { r = $resource.res; } )?
|
||||
( K_OF rn=userOrRoleName { role = sstring(static_cast<cql3::role_name>(rn).to_string()); } )?
|
||||
( K_NORECURSIVE { recursive = false; } )?
|
||||
{ $stmt = std::make_unique<list_permissions_statement>($permissionOrAll.perms, std::move(r), std::move(role), recursive); }
|
||||
{ $stmt = ::make_shared<list_permissions_statement>($permissionOrAll.perms, std::move(r), std::move(role), recursive); }
|
||||
;
|
||||
|
||||
permission returns [auth::permission perm]
|
||||
@@ -1131,7 +1131,7 @@ roleResource returns [uninitialized<auth::resource> res]
|
||||
/**
|
||||
* CREATE USER [IF NOT EXISTS] <username> [WITH PASSWORD <password>] [SUPERUSER|NOSUPERUSER]
|
||||
*/
|
||||
createUserStatement returns [std::unique_ptr<create_role_statement> stmt]
|
||||
createUserStatement returns [::shared_ptr<create_role_statement> stmt]
|
||||
@init {
|
||||
cql3::role_options opts;
|
||||
opts.is_superuser = false;
|
||||
@@ -1142,42 +1142,42 @@ createUserStatement returns [std::unique_ptr<create_role_statement> stmt]
|
||||
: K_CREATE K_USER (K_IF K_NOT K_EXISTS { ifNotExists = true; })? u=username
|
||||
( K_WITH K_PASSWORD v=STRING_LITERAL { opts.password = $v.text; })?
|
||||
( K_SUPERUSER { opts.is_superuser = true; } | K_NOSUPERUSER { opts.is_superuser = false; } )?
|
||||
{ $stmt = std::make_unique<create_role_statement>(cql3::role_name(u, cql3::preserve_role_case::yes), std::move(opts), ifNotExists); }
|
||||
{ $stmt = ::make_shared<create_role_statement>(cql3::role_name(u, cql3::preserve_role_case::yes), std::move(opts), ifNotExists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* ALTER USER <username> [WITH PASSWORD <password>] [SUPERUSER|NOSUPERUSER]
|
||||
*/
|
||||
alterUserStatement returns [std::unique_ptr<alter_role_statement> stmt]
|
||||
alterUserStatement returns [::shared_ptr<alter_role_statement> stmt]
|
||||
@init {
|
||||
cql3::role_options opts;
|
||||
}
|
||||
: K_ALTER K_USER u=username
|
||||
( K_WITH K_PASSWORD v=STRING_LITERAL { opts.password = $v.text; })?
|
||||
( K_SUPERUSER { opts.is_superuser = true; } | K_NOSUPERUSER { opts.is_superuser = false; } )?
|
||||
{ $stmt = std::make_unique<alter_role_statement>(cql3::role_name(u, cql3::preserve_role_case::yes), std::move(opts)); }
|
||||
{ $stmt = ::make_shared<alter_role_statement>(cql3::role_name(u, cql3::preserve_role_case::yes), std::move(opts)); }
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP USER [IF EXISTS] <username>
|
||||
*/
|
||||
dropUserStatement returns [std::unique_ptr<drop_role_statement> stmt]
|
||||
dropUserStatement returns [::shared_ptr<drop_role_statement> stmt]
|
||||
@init { bool ifExists = false; }
|
||||
: K_DROP K_USER (K_IF K_EXISTS { ifExists = true; })? u=username
|
||||
{ $stmt = std::make_unique<drop_role_statement>(cql3::role_name(u, cql3::preserve_role_case::yes), ifExists); }
|
||||
{ $stmt = ::make_shared<drop_role_statement>(cql3::role_name(u, cql3::preserve_role_case::yes), ifExists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* LIST USERS
|
||||
*/
|
||||
listUsersStatement returns [std::unique_ptr<list_users_statement> stmt]
|
||||
: K_LIST K_USERS { $stmt = std::make_unique<list_users_statement>(); }
|
||||
listUsersStatement returns [::shared_ptr<list_users_statement> stmt]
|
||||
: K_LIST K_USERS { $stmt = ::make_shared<list_users_statement>(); }
|
||||
;
|
||||
|
||||
/**
|
||||
* CREATE ROLE [IF NOT EXISTS] <role_name> [WITH <roleOption> [AND <roleOption>]*]
|
||||
*/
|
||||
createRoleStatement returns [std::unique_ptr<create_role_statement> stmt]
|
||||
createRoleStatement returns [::shared_ptr<create_role_statement> stmt]
|
||||
@init {
|
||||
cql3::role_options opts;
|
||||
opts.is_superuser = false;
|
||||
@@ -1186,36 +1186,36 @@ createRoleStatement returns [std::unique_ptr<create_role_statement> stmt]
|
||||
}
|
||||
: K_CREATE K_ROLE (K_IF K_NOT K_EXISTS { if_not_exists = true; })? name=userOrRoleName
|
||||
(K_WITH roleOptions[opts])?
|
||||
{ $stmt = std::make_unique<create_role_statement>(name, std::move(opts), if_not_exists); }
|
||||
{ $stmt = ::make_shared<create_role_statement>(name, std::move(opts), if_not_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* ALTER ROLE <rolename> [WITH <roleOption> [AND <roleOption>]*]
|
||||
*/
|
||||
alterRoleStatement returns [std::unique_ptr<alter_role_statement> stmt]
|
||||
alterRoleStatement returns [::shared_ptr<alter_role_statement> stmt]
|
||||
@init {
|
||||
cql3::role_options opts;
|
||||
}
|
||||
: K_ALTER K_ROLE name=userOrRoleName
|
||||
(K_WITH roleOptions[opts])?
|
||||
{ $stmt = std::make_unique<alter_role_statement>(name, std::move(opts)); }
|
||||
{ $stmt = ::make_shared<alter_role_statement>(name, std::move(opts)); }
|
||||
;
|
||||
|
||||
/**
|
||||
* DROP ROLE [IF EXISTS] <rolename>
|
||||
*/
|
||||
dropRoleStatement returns [std::unique_ptr<drop_role_statement> stmt]
|
||||
dropRoleStatement returns [::shared_ptr<drop_role_statement> stmt]
|
||||
@init {
|
||||
bool if_exists = false;
|
||||
}
|
||||
: K_DROP K_ROLE (K_IF K_EXISTS { if_exists = true; })? name=userOrRoleName
|
||||
{ $stmt = std::make_unique<drop_role_statement>(name, if_exists); }
|
||||
{ $stmt = ::make_shared<drop_role_statement>(name, if_exists); }
|
||||
;
|
||||
|
||||
/**
|
||||
* LIST ROLES [OF <rolename>] [NORECURSIVE]
|
||||
*/
|
||||
listRolesStatement returns [std::unique_ptr<list_roles_statement> stmt]
|
||||
listRolesStatement returns [::shared_ptr<list_roles_statement> stmt]
|
||||
@init {
|
||||
bool recursive = true;
|
||||
std::optional<cql3::role_name> grantee;
|
||||
@@ -1223,7 +1223,7 @@ listRolesStatement returns [std::unique_ptr<list_roles_statement> stmt]
|
||||
: K_LIST K_ROLES
|
||||
(K_OF g=userOrRoleName { grantee = std::move(g); })?
|
||||
(K_NORECURSIVE { recursive = false; })?
|
||||
{ $stmt = std::make_unique<list_roles_statement>(grantee, recursive); }
|
||||
{ $stmt = ::make_shared<list_roles_statement>(grantee, recursive); }
|
||||
;
|
||||
|
||||
roleOptions[cql3::role_options& opts]
|
||||
@@ -1258,17 +1258,17 @@ ident returns [shared_ptr<cql3::column_identifier> id]
|
||||
// Keyspace & Column family names
|
||||
keyspaceName returns [sstring id]
|
||||
@init { auto name = make_shared<cql3::cf_name>(); }
|
||||
: ksName[*name] { $id = name->get_keyspace(); }
|
||||
: ksName[name] { $id = name->get_keyspace(); }
|
||||
;
|
||||
|
||||
indexName returns [::shared_ptr<cql3::index_name> name]
|
||||
@init { $name = ::make_shared<cql3::index_name>(); }
|
||||
: (ksName[*name] '.')? idxName[*name]
|
||||
: (ksName[name] '.')? idxName[name]
|
||||
;
|
||||
|
||||
columnFamilyName returns [::shared_ptr<cql3::cf_name> name]
|
||||
@init { $name = ::make_shared<cql3::cf_name>(); }
|
||||
: (ksName[*name] '.')? cfName[*name]
|
||||
: (ksName[name] '.')? cfName[name]
|
||||
;
|
||||
|
||||
userTypeName returns [uninitialized<cql3::ut_name> name]
|
||||
@@ -1283,24 +1283,24 @@ userOrRoleName returns [uninitialized<cql3::role_name> name]
|
||||
| QMARK {add_recognition_error("Bind variables cannot be used for role names");}
|
||||
;
|
||||
|
||||
ksName[cql3::keyspace_element_name& name]
|
||||
: t=IDENT { $name.set_keyspace($t.text, false);}
|
||||
| t=QUOTED_NAME { $name.set_keyspace($t.text, true);}
|
||||
| k=unreserved_keyword { $name.set_keyspace(k, false);}
|
||||
ksName[::shared_ptr<cql3::keyspace_element_name> name]
|
||||
: t=IDENT { $name->set_keyspace($t.text, false);}
|
||||
| t=QUOTED_NAME { $name->set_keyspace($t.text, true);}
|
||||
| k=unreserved_keyword { $name->set_keyspace(k, false);}
|
||||
| QMARK {add_recognition_error("Bind variables cannot be used for keyspace names");}
|
||||
;
|
||||
|
||||
cfName[cql3::cf_name& name]
|
||||
: t=IDENT { $name.set_column_family($t.text, false); }
|
||||
| t=QUOTED_NAME { $name.set_column_family($t.text, true); }
|
||||
| k=unreserved_keyword { $name.set_column_family(k, false); }
|
||||
cfName[::shared_ptr<cql3::cf_name> name]
|
||||
: t=IDENT { $name->set_column_family($t.text, false); }
|
||||
| t=QUOTED_NAME { $name->set_column_family($t.text, true); }
|
||||
| k=unreserved_keyword { $name->set_column_family(k, false); }
|
||||
| QMARK {add_recognition_error("Bind variables cannot be used for table names");}
|
||||
;
|
||||
|
||||
idxName[cql3::index_name& name]
|
||||
: t=IDENT { $name.set_index($t.text, false); }
|
||||
| t=QUOTED_NAME { $name.set_index($t.text, true);}
|
||||
| k=unreserved_keyword { $name.set_index(k, false); }
|
||||
idxName[::shared_ptr<cql3::index_name> name]
|
||||
: t=IDENT { $name->set_index($t.text, false); }
|
||||
| t=QUOTED_NAME { $name->set_index($t.text, true);}
|
||||
| k=unreserved_keyword { $name->set_index(k, false); }
|
||||
| QMARK {add_recognition_error("Bind variables cannot be used for index names");}
|
||||
;
|
||||
|
||||
@@ -1416,12 +1416,12 @@ normalColumnOperation[operations_type& operations, ::shared_ptr<cql3::column_ide
|
||||
: t=term ('+' c=cident )?
|
||||
{
|
||||
if (!c) {
|
||||
add_raw_update(operations, key, std::make_unique<cql3::operation::set_value>(t));
|
||||
add_raw_update(operations, key, ::make_shared<cql3::operation::set_value>(t));
|
||||
} else {
|
||||
if (*key != *c) {
|
||||
add_recognition_error("Only expressions of the form X = <value> + X are supported.");
|
||||
}
|
||||
add_raw_update(operations, key, std::make_unique<cql3::operation::prepend>(t));
|
||||
add_raw_update(operations, key, ::make_shared<cql3::operation::prepend>(t));
|
||||
}
|
||||
}
|
||||
| c=cident sig=('+' | '-') t=term
|
||||
@@ -1429,11 +1429,11 @@ normalColumnOperation[operations_type& operations, ::shared_ptr<cql3::column_ide
|
||||
if (*key != *c) {
|
||||
add_recognition_error("Only expressions of the form X = X " + $sig.text + "<value> are supported.");
|
||||
}
|
||||
std::unique_ptr<cql3::operation::raw_update> op;
|
||||
shared_ptr<cql3::operation::raw_update> op;
|
||||
if ($sig.text == "+") {
|
||||
op = std::make_unique<cql3::operation::addition>(t);
|
||||
op = make_shared<cql3::operation::addition>(t);
|
||||
} else {
|
||||
op = std::make_unique<cql3::operation::subtraction>(t);
|
||||
op = make_shared<cql3::operation::subtraction>(t);
|
||||
}
|
||||
add_raw_update(operations, key, std::move(op));
|
||||
}
|
||||
@@ -1444,11 +1444,11 @@ normalColumnOperation[operations_type& operations, ::shared_ptr<cql3::column_ide
|
||||
// We don't yet allow a '+' in front of an integer, but we could in the future really, so let's be future-proof in our error message
|
||||
add_recognition_error("Only expressions of the form X = X " + sstring($i.text[0] == '-' ? "-" : "+") + " <value> are supported.");
|
||||
}
|
||||
add_raw_update(operations, key, std::make_unique<cql3::operation::addition>(cql3::constants::literal::integer($i.text)));
|
||||
add_raw_update(operations, key, make_shared<cql3::operation::addition>(cql3::constants::literal::integer($i.text)));
|
||||
}
|
||||
| K_SCYLLA_COUNTER_SHARD_LIST '(' t=term ')'
|
||||
{
|
||||
add_raw_update(operations, key, std::make_unique<cql3::operation::set_counter_value_from_tuple_list>(t));
|
||||
add_raw_update(operations, key, ::make_shared<cql3::operation::set_counter_value_from_tuple_list>(t));
|
||||
}
|
||||
;
|
||||
|
||||
@@ -1458,7 +1458,7 @@ collectionColumnOperation[operations_type& operations,
|
||||
bool by_uuid]
|
||||
: '=' t=term
|
||||
{
|
||||
add_raw_update(operations, key, std::make_unique<cql3::operation::set_element>(k, t, by_uuid));
|
||||
add_raw_update(operations, key, make_shared<cql3::operation::set_element>(k, t, by_uuid));
|
||||
}
|
||||
;
|
||||
|
||||
@@ -1467,7 +1467,7 @@ udtColumnOperation[operations_type& operations,
|
||||
shared_ptr<cql3::column_identifier> field]
|
||||
: '=' t=term
|
||||
{
|
||||
add_raw_update(operations, std::move(key), std::make_unique<cql3::operation::set_field>(std::move(field), std::move(t)));
|
||||
add_raw_update(operations, std::move(key), make_shared<cql3::operation::set_field>(std::move(field), std::move(t)));
|
||||
}
|
||||
;
|
||||
|
||||
@@ -1489,13 +1489,13 @@ columnCondition[conditions_type& conditions]
|
||||
)
|
||||
;
|
||||
|
||||
properties[cql3::statements::property_definitions& props]
|
||||
properties[::shared_ptr<cql3::statements::property_definitions> props]
|
||||
: property[props] (K_AND property[props])*
|
||||
;
|
||||
|
||||
property[cql3::statements::property_definitions& props]
|
||||
: k=ident '=' simple=propertyValue { try { $props.add_property(k->to_string(), simple); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
| k=ident '=' map=mapLiteral { try { $props.add_property(k->to_string(), convert_property_map(map)); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
property[::shared_ptr<cql3::statements::property_definitions> props]
|
||||
: k=ident '=' simple=propertyValue { try { $props->add_property(k->to_string(), simple); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
| k=ident '=' map=mapLiteral { try { $props->add_property(k->to_string(), convert_property_map(map)); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
|
||||
;
|
||||
|
||||
propertyValue returns [sstring str]
|
||||
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
abstract_marker::abstract_marker(int32_t bind_index, lw_shared_ptr<column_specification>&& receiver)
|
||||
abstract_marker::abstract_marker(int32_t bind_index, ::shared_ptr<column_specification>&& receiver)
|
||||
: _bind_index{bind_index}
|
||||
, _receiver{std::move(receiver)}
|
||||
{ }
|
||||
@@ -67,7 +67,7 @@ abstract_marker::raw::raw(int32_t bind_index)
|
||||
: _bind_index{bind_index}
|
||||
{ }
|
||||
|
||||
::shared_ptr<term> abstract_marker::raw::prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const
|
||||
::shared_ptr<term> abstract_marker::raw::prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const
|
||||
{
|
||||
if (receiver->type->is_collection()) {
|
||||
if (receiver->type->get_kind() == abstract_type::kind::list) {
|
||||
@@ -87,7 +87,7 @@ abstract_marker::raw::raw(int32_t bind_index)
|
||||
return ::make_shared<constants::marker>(_bind_index, receiver);
|
||||
}
|
||||
|
||||
assignment_testable::test_result abstract_marker::raw::test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const {
|
||||
assignment_testable::test_result abstract_marker::raw::test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const {
|
||||
return assignment_testable::test_result::WEAKLY_ASSIGNABLE;
|
||||
}
|
||||
|
||||
@@ -99,13 +99,13 @@ abstract_marker::in_raw::in_raw(int32_t bind_index)
|
||||
: raw{bind_index}
|
||||
{ }
|
||||
|
||||
lw_shared_ptr<column_specification> abstract_marker::in_raw::make_in_receiver(const column_specification& receiver) {
|
||||
auto in_name = ::make_shared<column_identifier>(sstring("in(") + receiver.name->to_string() + sstring(")"), true);
|
||||
return make_lw_shared<column_specification>(receiver.ks_name, receiver.cf_name, in_name, list_type_impl::get_instance(receiver.type, false));
|
||||
::shared_ptr<column_specification> abstract_marker::in_raw::make_in_receiver(::shared_ptr<column_specification> receiver) {
|
||||
auto in_name = ::make_shared<column_identifier>(sstring("in(") + receiver->name->to_string() + sstring(")"), true);
|
||||
return ::make_shared<column_specification>(receiver->ks_name, receiver->cf_name, in_name, list_type_impl::get_instance(receiver->type, false));
|
||||
}
|
||||
|
||||
::shared_ptr<term> abstract_marker::in_raw::prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const {
|
||||
return ::make_shared<lists::marker>(_bind_index, make_in_receiver(*receiver));
|
||||
::shared_ptr<term> abstract_marker::in_raw::prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const {
|
||||
return ::make_shared<lists::marker>(_bind_index, make_in_receiver(receiver));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -53,9 +53,9 @@ namespace cql3 {
|
||||
class abstract_marker : public non_terminal {
|
||||
protected:
|
||||
const int32_t _bind_index;
|
||||
const lw_shared_ptr<column_specification> _receiver;
|
||||
const ::shared_ptr<column_specification> _receiver;
|
||||
public:
|
||||
abstract_marker(int32_t bind_index, lw_shared_ptr<column_specification>&& receiver);
|
||||
abstract_marker(int32_t bind_index, ::shared_ptr<column_specification>&& receiver);
|
||||
|
||||
virtual void collect_marker_specification(variable_specifications& bound_names) const override;
|
||||
|
||||
@@ -70,9 +70,9 @@ public:
|
||||
public:
|
||||
raw(int32_t bind_index);
|
||||
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override;
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const override;
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
|
||||
virtual sstring to_string() const override;
|
||||
};
|
||||
@@ -87,9 +87,9 @@ public:
|
||||
public:
|
||||
in_raw(int32_t bind_index);
|
||||
private:
|
||||
static lw_shared_ptr<column_specification> make_in_receiver(const column_specification& receiver);
|
||||
static ::shared_ptr<column_specification> make_in_receiver(::shared_ptr<column_specification> receiver);
|
||||
public:
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override;
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ public:
|
||||
// Test all elements of toTest for assignment. If all are exact match, return exact match. If any is not assignable,
|
||||
// return not assignable. Otherwise, return weakly assignable.
|
||||
template <typename AssignmentTestablePtrRange>
|
||||
static test_result test_all(database& db, const sstring& keyspace, const column_specification& receiver,
|
||||
static test_result test_all(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver,
|
||||
AssignmentTestablePtrRange&& to_test) {
|
||||
test_result res = test_result::EXACT_MATCH;
|
||||
for (auto&& rt : to_test) {
|
||||
@@ -99,7 +99,7 @@ public:
|
||||
* Most caller should just call the isAssignable() method on the result, though functions have a use for
|
||||
* testing "strong" equality to decide the most precise overload to pick when multiple could match.
|
||||
*/
|
||||
virtual test_result test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const = 0;
|
||||
virtual test_result test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const = 0;
|
||||
|
||||
// for error reporting
|
||||
virtual sstring assignment_testable_source_context() const = 0;
|
||||
|
||||
@@ -135,12 +135,12 @@ std::unique_ptr<attributes> attributes::raw::prepare(database& db, const sstring
|
||||
return std::unique_ptr<attributes>{new attributes{std::move(ts), std::move(ttl)}};
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification> attributes::raw::timestamp_receiver(const sstring& ks_name, const sstring& cf_name) const {
|
||||
return make_lw_shared<column_specification>(ks_name, cf_name, ::make_shared<column_identifier>("[timestamp]", true), data_type_for<int64_t>());
|
||||
::shared_ptr<column_specification> attributes::raw::timestamp_receiver(const sstring& ks_name, const sstring& cf_name) const {
|
||||
return ::make_shared<column_specification>(ks_name, cf_name, ::make_shared<column_identifier>("[timestamp]", true), data_type_for<int64_t>());
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification> attributes::raw::time_to_live_receiver(const sstring& ks_name, const sstring& cf_name) const {
|
||||
return make_lw_shared<column_specification>(ks_name, cf_name, ::make_shared<column_identifier>("[ttl]", true), data_type_for<int32_t>());
|
||||
::shared_ptr<column_specification> attributes::raw::time_to_live_receiver(const sstring& ks_name, const sstring& cf_name) const {
|
||||
return ::make_shared<column_specification>(ks_name, cf_name, ::make_shared<column_identifier>("[ttl]", true), data_type_for<int32_t>());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -78,9 +78,9 @@ public:
|
||||
|
||||
std::unique_ptr<attributes> prepare(database& db, const sstring& ks_name, const sstring& cf_name) const;
|
||||
private:
|
||||
lw_shared_ptr<column_specification> timestamp_receiver(const sstring& ks_name, const sstring& cf_name) const;
|
||||
::shared_ptr<column_specification> timestamp_receiver(const sstring& ks_name, const sstring& cf_name) const;
|
||||
|
||||
lw_shared_ptr<column_specification> time_to_live_receiver(const sstring& ks_name, const sstring& cf_name) const;
|
||||
::shared_ptr<column_specification> time_to_live_receiver(const sstring& ks_name, const sstring& cf_name) const;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -291,13 +291,13 @@ bool column_condition::applies_to(const data_value* cell_value, const query_opti
|
||||
}
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_condition>
|
||||
::shared_ptr<column_condition>
|
||||
column_condition::raw::prepare(database& db, const sstring& keyspace, const column_definition& receiver) const {
|
||||
if (receiver.type->is_counter()) {
|
||||
throw exceptions::invalid_request_exception("Conditions on counters are not supported");
|
||||
}
|
||||
shared_ptr<term> collection_element_term;
|
||||
lw_shared_ptr<column_specification> value_spec = receiver.column_specification;
|
||||
shared_ptr<column_specification> value_spec = receiver.column_specification;
|
||||
|
||||
if (_collection_element) {
|
||||
if (!receiver.type->is_collection()) {
|
||||
@@ -306,7 +306,7 @@ column_condition::raw::prepare(database& db, const sstring& keyspace, const colu
|
||||
}
|
||||
// Pass a correct type specification to the collection_element->prepare(), so that it can
|
||||
// later be used to validate the parameter type is compatible with receiver type.
|
||||
lw_shared_ptr<column_specification> element_spec;
|
||||
shared_ptr<column_specification> element_spec;
|
||||
auto ctype = static_cast<const collection_type_impl*>(receiver.type.get());
|
||||
const column_specification& recv_column_spec = *receiver.column_specification;
|
||||
if (ctype->get_kind() == abstract_type::kind::list) {
|
||||
|
||||
@@ -104,16 +104,16 @@ public:
|
||||
* "IF col = 'foo'"
|
||||
* "IF col LIKE <pattern>"
|
||||
*/
|
||||
static lw_shared_ptr<column_condition> condition(const column_definition& def, ::shared_ptr<term> collection_element,
|
||||
static ::shared_ptr<column_condition> condition(const column_definition& def, ::shared_ptr<term> collection_element,
|
||||
::shared_ptr<term> value, std::unique_ptr<like_matcher> matcher, const operator_type& op) {
|
||||
return make_lw_shared<column_condition>(def, std::move(collection_element), std::move(value),
|
||||
return ::make_shared<column_condition>(def, std::move(collection_element), std::move(value),
|
||||
std::vector<::shared_ptr<term>>{}, std::move(matcher), op);
|
||||
}
|
||||
|
||||
// Helper constructor wrapper for "IF col IN ... and IF col['key'] IN ... */
|
||||
static lw_shared_ptr<column_condition> in_condition(const column_definition& def, ::shared_ptr<term> collection_element,
|
||||
static ::shared_ptr<column_condition> in_condition(const column_definition& def, ::shared_ptr<term> collection_element,
|
||||
::shared_ptr<term> in_marker, std::vector<::shared_ptr<term>> in_values) {
|
||||
return make_lw_shared<column_condition>(def, std::move(collection_element), std::move(in_marker),
|
||||
return ::make_shared<column_condition>(def, std::move(collection_element), std::move(in_marker),
|
||||
std::move(in_values), nullptr, operator_type::IN);
|
||||
}
|
||||
|
||||
@@ -146,9 +146,9 @@ public:
|
||||
* "IF col = 'foo'"
|
||||
* "IF col LIKE 'foo%'"
|
||||
*/
|
||||
static lw_shared_ptr<raw> simple_condition(::shared_ptr<term::raw> value, ::shared_ptr<term::raw> collection_element,
|
||||
static ::shared_ptr<raw> simple_condition(::shared_ptr<term::raw> value, ::shared_ptr<term::raw> collection_element,
|
||||
const operator_type& op) {
|
||||
return make_lw_shared<raw>(std::move(value), std::vector<::shared_ptr<term::raw>>{},
|
||||
return ::make_shared<raw>(std::move(value), std::vector<::shared_ptr<term::raw>>{},
|
||||
::shared_ptr<abstract_marker::in_raw>{}, std::move(collection_element), op);
|
||||
}
|
||||
|
||||
@@ -160,13 +160,13 @@ public:
|
||||
* "IF col['key'] IN * ('foo', 'bar', ...)"
|
||||
* "IF col['key'] IN ?"
|
||||
*/
|
||||
static lw_shared_ptr<raw> in_condition(::shared_ptr<term::raw> collection_element,
|
||||
static ::shared_ptr<raw> in_condition(::shared_ptr<term::raw> collection_element,
|
||||
::shared_ptr<abstract_marker::in_raw> in_marker, std::vector<::shared_ptr<term::raw>> in_values) {
|
||||
return make_lw_shared<raw>(::shared_ptr<term::raw>{}, std::move(in_values), std::move(in_marker),
|
||||
return ::make_shared<raw>(::shared_ptr<term::raw>{}, std::move(in_values), std::move(in_marker),
|
||||
std::move(collection_element), operator_type::IN);
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_condition> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const;
|
||||
::shared_ptr<column_condition> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -139,6 +139,16 @@ static inline
|
||||
return def.column_specification->name;
|
||||
}
|
||||
|
||||
static inline
|
||||
std::vector<::shared_ptr<column_identifier>> to_identifiers(const std::vector<const column_definition*>& defs) {
|
||||
std::vector<::shared_ptr<column_identifier>> r;
|
||||
r.reserve(defs.size());
|
||||
for (auto&& def : defs) {
|
||||
r.push_back(to_identifier(*def));
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace std {
|
||||
|
||||
@@ -51,7 +51,7 @@ column_specification::column_specification(std::string_view ks_name_, std::strin
|
||||
{ }
|
||||
|
||||
|
||||
bool column_specification::all_in_same_table(const std::vector<lw_shared_ptr<column_specification>>& names)
|
||||
bool column_specification::all_in_same_table(const std::vector<::shared_ptr<column_specification>>& names)
|
||||
{
|
||||
assert(!names.empty());
|
||||
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
class column_specification;
|
||||
class column_identifier;
|
||||
|
||||
class column_specification final {
|
||||
@@ -62,15 +63,15 @@ public:
|
||||
* @param alias the column alias
|
||||
* @return a new <code>ColumnSpecification</code> for the same column but with the specified alias.
|
||||
*/
|
||||
lw_shared_ptr<column_specification> with_alias(::shared_ptr<column_identifier> alias) {
|
||||
return make_lw_shared<column_specification>(ks_name, cf_name, alias, type);
|
||||
::shared_ptr<column_specification> with_alias(::shared_ptr<column_identifier> alias) {
|
||||
return ::make_shared<column_specification>(ks_name, cf_name, alias, type);
|
||||
}
|
||||
|
||||
bool is_reversed_type() const {
|
||||
return ::dynamic_pointer_cast<const reversed_type_impl>(type) != nullptr;
|
||||
}
|
||||
|
||||
static bool all_in_same_table(const std::vector<lw_shared_ptr<column_specification>>& names);
|
||||
static bool all_in_same_table(const std::vector<::shared_ptr<column_specification>>& names);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -82,9 +82,9 @@ constants::literal::parsed_value(data_type validator) const
|
||||
}
|
||||
|
||||
assignment_testable::test_result
|
||||
constants::literal::test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const
|
||||
constants::literal::test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const
|
||||
{
|
||||
auto receiver_type = receiver.type->as_cql3_type();
|
||||
auto receiver_type = receiver->type->as_cql3_type();
|
||||
if (receiver_type.is_collection() || receiver_type.is_user_type()) {
|
||||
return test_result::NOT_ASSIGNABLE;
|
||||
}
|
||||
@@ -155,9 +155,9 @@ constants::literal::test_assignment(database& db, const sstring& keyspace, const
|
||||
}
|
||||
|
||||
::shared_ptr<term>
|
||||
constants::literal::prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const
|
||||
constants::literal::prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const
|
||||
{
|
||||
if (!is_assignable(test_assignment(db, keyspace, *receiver))) {
|
||||
if (!is_assignable(test_assignment(db, keyspace, receiver))) {
|
||||
throw exceptions::invalid_request_exception(format("Invalid {} constant ({}) for \"{}\" of type {}",
|
||||
_type, _text, *receiver->name, receiver->type->as_cql3_type().to_string()));
|
||||
}
|
||||
|
||||
@@ -87,8 +87,8 @@ public:
|
||||
};
|
||||
public:
|
||||
static thread_local const ::shared_ptr<terminal> NULL_VALUE;
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override {
|
||||
if (!is_assignable(test_assignment(db, keyspace, *receiver))) {
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override {
|
||||
if (!is_assignable(test_assignment(db, keyspace, receiver))) {
|
||||
throw exceptions::invalid_request_exception("Invalid null value for counter increment/decrement");
|
||||
}
|
||||
return NULL_VALUE;
|
||||
@@ -96,8 +96,8 @@ public:
|
||||
|
||||
virtual assignment_testable::test_result test_assignment(database& db,
|
||||
const sstring& keyspace,
|
||||
const column_specification& receiver) const override {
|
||||
return receiver.type->is_counter()
|
||||
::shared_ptr<column_specification> receiver) const override {
|
||||
return receiver->type->is_counter()
|
||||
? assignment_testable::test_result::NOT_ASSIGNABLE
|
||||
: assignment_testable::test_result::WEAKLY_ASSIGNABLE;
|
||||
}
|
||||
@@ -153,7 +153,7 @@ public:
|
||||
return ::make_shared<literal>(type::DURATION, text);
|
||||
}
|
||||
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override;
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
private:
|
||||
bytes parsed_value(data_type validator) const;
|
||||
public:
|
||||
@@ -161,7 +161,7 @@ public:
|
||||
return _text;
|
||||
}
|
||||
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const;
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const;
|
||||
|
||||
virtual sstring to_string() const override {
|
||||
return _type == type::STRING ? sstring(format("'{}'", _text)) : _text;
|
||||
@@ -170,7 +170,7 @@ public:
|
||||
|
||||
class marker : public abstract_marker {
|
||||
public:
|
||||
marker(int32_t bind_index, lw_shared_ptr<column_specification> receiver)
|
||||
marker(int32_t bind_index, ::shared_ptr<column_specification> receiver)
|
||||
: abstract_marker{bind_index, std::move(receiver)}
|
||||
{
|
||||
assert(!_receiver->type->is_collection() && !_receiver->type->is_user_type());
|
||||
|
||||
@@ -95,6 +95,10 @@ public:
|
||||
return _name.keyspace == ks_name && _name.name == function_name;
|
||||
}
|
||||
|
||||
virtual bool has_reference_to(function& f) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual sstring column_name(const std::vector<sstring>& column_names) const override {
|
||||
return format("{}({})", _name, join(", ", column_names));
|
||||
}
|
||||
|
||||
@@ -144,6 +144,10 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool has_reference_to(function& f) const override {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual sstring column_name(const std::vector<sstring>& column_names) const override {
|
||||
return "[json]";
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ public:
|
||||
};
|
||||
|
||||
shared_ptr<function> make_castas_function(data_type to_type, data_type from_type, castas_fctn func) {
|
||||
return ::make_shared<castas_function_for>(std::move(to_type), std::move(from_type), func);
|
||||
return ::make_shared<castas_function_for>(std::move(to_type), std::move(from_type), std::move(func));
|
||||
}
|
||||
|
||||
} /* Anonymous Namespace */
|
||||
@@ -73,22 +73,22 @@ shared_ptr<function> make_castas_function(data_type to_type, data_type from_type
|
||||
*/
|
||||
namespace {
|
||||
|
||||
static data_value identity_castas_fctn(data_value val) {
|
||||
return val;
|
||||
}
|
||||
|
||||
using bytes_opt = std::optional<bytes>;
|
||||
|
||||
template<typename ToType, typename FromType>
|
||||
static data_value castas_fctn_simple(data_value from) {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
return static_cast<ToType>(val_from);
|
||||
std::function<data_value(data_value)> make_castas_fctn_simple() {
|
||||
return [](data_value from) -> data_value {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
return static_cast<ToType>(val_from);
|
||||
};
|
||||
}
|
||||
|
||||
template<typename ToType>
|
||||
static data_value castas_fctn_from_decimal_to_float(data_value from) {
|
||||
auto val_from = value_cast<big_decimal>(from);
|
||||
return static_cast<ToType>(val_from.as_rational());
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_decimal_to_float() {
|
||||
return [](data_value from) -> data_value {
|
||||
auto val_from = value_cast<big_decimal>(from);
|
||||
return static_cast<ToType>(val_from.as_rational());
|
||||
};
|
||||
}
|
||||
|
||||
static utils::multiprecision_int from_decimal_to_cppint(const data_value& from) {
|
||||
@@ -98,44 +98,60 @@ static utils::multiprecision_int from_decimal_to_cppint(const data_value& from)
|
||||
}
|
||||
|
||||
template<typename ToType>
|
||||
static data_value castas_fctn_from_varint_to_integer(data_value from) {
|
||||
const auto& varint = value_cast<utils::multiprecision_int>(from);
|
||||
return static_cast<ToType>(from_varint_to_integer(varint));
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_varint_to_integer() {
|
||||
return [](data_value from) -> data_value {
|
||||
const auto& varint = value_cast<utils::multiprecision_int>(from);
|
||||
return static_cast<ToType>(from_varint_to_integer(varint));
|
||||
};
|
||||
}
|
||||
|
||||
template<typename ToType>
|
||||
static data_value castas_fctn_from_decimal_to_integer(data_value from) {
|
||||
auto varint = from_decimal_to_cppint(from);
|
||||
return static_cast<ToType>(from_varint_to_integer(varint));
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_decimal_to_integer() {
|
||||
return [](data_value from) -> data_value {
|
||||
auto varint = from_decimal_to_cppint(from);
|
||||
return static_cast<ToType>(from_varint_to_integer(varint));
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_decimal_to_varint(data_value from) {
|
||||
return from_decimal_to_cppint(from);
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_decimal_to_varint() {
|
||||
return [](data_value from) -> data_value {
|
||||
return from_decimal_to_cppint(from);
|
||||
};
|
||||
}
|
||||
|
||||
template<typename FromType>
|
||||
static data_value castas_fctn_from_integer_to_decimal(data_value from) {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
return big_decimal(1, 10*static_cast<boost::multiprecision::cpp_int>(val_from));
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_integer_to_decimal() {
|
||||
return [](data_value from) -> data_value {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
return big_decimal(1, 10*static_cast<boost::multiprecision::cpp_int>(val_from));
|
||||
};
|
||||
}
|
||||
|
||||
template<typename FromType>
|
||||
static data_value castas_fctn_from_float_to_decimal(data_value from) {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
return big_decimal(boost::lexical_cast<std::string>(val_from));
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_float_to_decimal() {
|
||||
return [](data_value from) -> data_value {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
return big_decimal(boost::lexical_cast<std::string>(val_from));
|
||||
};
|
||||
}
|
||||
|
||||
template<typename FromType>
|
||||
static data_value castas_fctn_to_string(data_value from) {
|
||||
return to_sstring(value_cast<FromType>(from));
|
||||
std::function<data_value(data_value)> make_castas_fctn_to_string() {
|
||||
return [](data_value from) -> data_value {
|
||||
return to_sstring(value_cast<FromType>(from));
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_varint_to_string(data_value from) {
|
||||
return to_sstring(value_cast<utils::multiprecision_int>(from).str());
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_varint_to_string() {
|
||||
return [](data_value from) -> data_value {
|
||||
return to_sstring(value_cast<utils::multiprecision_int>(from).str());
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_decimal_to_string(data_value from) {
|
||||
return value_cast<big_decimal>(from).to_string();
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_decimal_to_string() {
|
||||
return [](data_value from) -> data_value {
|
||||
return value_cast<big_decimal>(from).to_string();
|
||||
};
|
||||
}
|
||||
|
||||
db_clock::time_point millis_to_time_point(const int64_t millis) {
|
||||
@@ -158,237 +174,178 @@ db_clock::time_point date_to_time_point(const uint32_t date) {
|
||||
return db_clock::time_point(std::chrono::duration_cast<db_clock::duration>(millis));
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_timestamp_to_date(data_value from) {
|
||||
const auto val_from = value_cast<db_clock::time_point>(from);
|
||||
return time_point_to_date(val_from);
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_timestamp_to_date() {
|
||||
return [](data_value from) -> data_value {
|
||||
const auto val_from = value_cast<db_clock::time_point>(from);
|
||||
return time_point_to_date(val_from);
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_date_to_timestamp(data_value from) {
|
||||
const auto val_from = value_cast<uint32_t>(from);
|
||||
return date_to_time_point(val_from);
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_date_to_timestamp() {
|
||||
return [](data_value from) -> data_value {
|
||||
const auto val_from = value_cast<uint32_t>(from);
|
||||
return date_to_time_point(val_from);
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_timeuuid_to_timestamp(data_value from) {
|
||||
const auto val_from = value_cast<utils::UUID>(from);
|
||||
return db_clock::time_point{db_clock::duration{utils::UUID_gen::unix_timestamp(val_from)}};
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_timeuuid_to_timestamp() {
|
||||
return [](data_value from) -> data_value {
|
||||
const auto val_from = value_cast<utils::UUID>(from);
|
||||
return db_clock::time_point{db_clock::duration{utils::UUID_gen::unix_timestamp(val_from)}};
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_timeuuid_to_date(data_value from) {
|
||||
const auto val_from = value_cast<utils::UUID>(from);
|
||||
return time_point_to_date(millis_to_time_point(utils::UUID_gen::unix_timestamp(val_from)));
|
||||
std::function<data_value(data_value)> make_castas_fctn_from_timeuuid_to_date() {
|
||||
return [](data_value from) -> data_value {
|
||||
const auto val_from = value_cast<utils::UUID>(from);
|
||||
return time_point_to_date(millis_to_time_point(utils::UUID_gen::unix_timestamp(val_from)));
|
||||
};
|
||||
}
|
||||
|
||||
static data_value castas_fctn_from_dv_to_string(data_value from) {
|
||||
return from.type()->to_string_impl(from);
|
||||
static std::function<data_value(data_value)> make_castas_fctn_from_dv_to_string() {
|
||||
return [](data_value from) -> data_value {
|
||||
return from.type()->to_string_impl(from);
|
||||
};
|
||||
}
|
||||
|
||||
// FIXME: Add conversions for counters, after they are fully implemented...
|
||||
|
||||
static constexpr unsigned next_power_of_2(unsigned val) {
|
||||
unsigned ret = 1;
|
||||
while (ret <= val) {
|
||||
ret *= 2;
|
||||
// Map <ToType, FromType> -> castas_fctn
|
||||
using castas_fctn_key = std::pair<data_type, data_type>;
|
||||
struct castas_fctn_hash {
|
||||
std::size_t operator()(const castas_fctn_key& x) const noexcept {
|
||||
return boost::hash_value(x);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
};
|
||||
using castas_fctns_map = std::unordered_map<castas_fctn_key, castas_fctn, castas_fctn_hash>;
|
||||
|
||||
// List of supported castas functions...
|
||||
thread_local castas_fctns_map castas_fctns {
|
||||
{ {byte_type, byte_type}, make_castas_fctn_simple<int8_t, int8_t>() },
|
||||
{ {byte_type, short_type}, make_castas_fctn_simple<int8_t, int16_t>() },
|
||||
{ {byte_type, int32_type}, make_castas_fctn_simple<int8_t, int32_t>() },
|
||||
{ {byte_type, long_type}, make_castas_fctn_simple<int8_t, int64_t>() },
|
||||
{ {byte_type, float_type}, make_castas_fctn_simple<int8_t, float>() },
|
||||
{ {byte_type, double_type}, make_castas_fctn_simple<int8_t, double>() },
|
||||
{ {byte_type, varint_type}, make_castas_fctn_from_varint_to_integer<int8_t>() },
|
||||
{ {byte_type, decimal_type}, make_castas_fctn_from_decimal_to_integer<int8_t>() },
|
||||
|
||||
{ {short_type, byte_type}, make_castas_fctn_simple<int16_t, int8_t>() },
|
||||
{ {short_type, short_type}, make_castas_fctn_simple<int16_t, int16_t>() },
|
||||
{ {short_type, int32_type}, make_castas_fctn_simple<int16_t, int32_t>() },
|
||||
{ {short_type, long_type}, make_castas_fctn_simple<int16_t, int64_t>() },
|
||||
{ {short_type, float_type}, make_castas_fctn_simple<int16_t, float>() },
|
||||
{ {short_type, double_type}, make_castas_fctn_simple<int16_t, double>() },
|
||||
{ {short_type, varint_type}, make_castas_fctn_from_varint_to_integer<int16_t>() },
|
||||
{ {short_type, decimal_type}, make_castas_fctn_from_decimal_to_integer<int16_t>() },
|
||||
|
||||
{ {int32_type, byte_type}, make_castas_fctn_simple<int32_t, int8_t>() },
|
||||
{ {int32_type, short_type}, make_castas_fctn_simple<int32_t, int16_t>() },
|
||||
{ {int32_type, int32_type}, make_castas_fctn_simple<int32_t, int32_t>() },
|
||||
{ {int32_type, long_type}, make_castas_fctn_simple<int32_t, int64_t>() },
|
||||
{ {int32_type, float_type}, make_castas_fctn_simple<int32_t, float>() },
|
||||
{ {int32_type, double_type}, make_castas_fctn_simple<int32_t, double>() },
|
||||
{ {int32_type, varint_type}, make_castas_fctn_from_varint_to_integer<int32_t>() },
|
||||
{ {int32_type, decimal_type}, make_castas_fctn_from_decimal_to_integer<int32_t>() },
|
||||
|
||||
{ {long_type, byte_type}, make_castas_fctn_simple<int64_t, int8_t>() },
|
||||
{ {long_type, short_type}, make_castas_fctn_simple<int64_t, int16_t>() },
|
||||
{ {long_type, int32_type}, make_castas_fctn_simple<int64_t, int32_t>() },
|
||||
{ {long_type, long_type}, make_castas_fctn_simple<int64_t, int64_t>() },
|
||||
{ {long_type, float_type}, make_castas_fctn_simple<int64_t, float>() },
|
||||
{ {long_type, double_type}, make_castas_fctn_simple<int64_t, double>() },
|
||||
{ {long_type, varint_type}, make_castas_fctn_from_varint_to_integer<int64_t>() },
|
||||
{ {long_type, decimal_type}, make_castas_fctn_from_decimal_to_integer<int64_t>() },
|
||||
|
||||
{ {float_type, byte_type}, make_castas_fctn_simple<float, int8_t>() },
|
||||
{ {float_type, short_type}, make_castas_fctn_simple<float, int16_t>() },
|
||||
{ {float_type, int32_type}, make_castas_fctn_simple<float, int32_t>() },
|
||||
{ {float_type, long_type}, make_castas_fctn_simple<float, int64_t>() },
|
||||
{ {float_type, float_type}, make_castas_fctn_simple<float, float>() },
|
||||
{ {float_type, double_type}, make_castas_fctn_simple<float, double>() },
|
||||
{ {float_type, varint_type}, make_castas_fctn_simple<float, utils::multiprecision_int>() },
|
||||
{ {float_type, decimal_type}, make_castas_fctn_from_decimal_to_float<float>() },
|
||||
|
||||
{ {double_type, byte_type}, make_castas_fctn_simple<double, int8_t>() },
|
||||
{ {double_type, short_type}, make_castas_fctn_simple<double, int16_t>() },
|
||||
{ {double_type, int32_type}, make_castas_fctn_simple<double, int32_t>() },
|
||||
{ {double_type, long_type}, make_castas_fctn_simple<double, int64_t>() },
|
||||
{ {double_type, float_type}, make_castas_fctn_simple<double, float>() },
|
||||
{ {double_type, double_type}, make_castas_fctn_simple<double, double>() },
|
||||
{ {double_type, varint_type}, make_castas_fctn_simple<double, utils::multiprecision_int>() },
|
||||
{ {double_type, decimal_type}, make_castas_fctn_from_decimal_to_float<double>() },
|
||||
|
||||
{ {varint_type, byte_type}, make_castas_fctn_simple<utils::multiprecision_int, int8_t>() },
|
||||
{ {varint_type, short_type}, make_castas_fctn_simple<utils::multiprecision_int, int16_t>() },
|
||||
{ {varint_type, int32_type}, make_castas_fctn_simple<utils::multiprecision_int, int32_t>() },
|
||||
{ {varint_type, long_type}, make_castas_fctn_simple<utils::multiprecision_int, int64_t>() },
|
||||
{ {varint_type, float_type}, make_castas_fctn_simple<utils::multiprecision_int, float>() },
|
||||
{ {varint_type, double_type}, make_castas_fctn_simple<utils::multiprecision_int, double>() },
|
||||
{ {varint_type, varint_type}, make_castas_fctn_simple<utils::multiprecision_int, utils::multiprecision_int>() },
|
||||
{ {varint_type, decimal_type}, make_castas_fctn_from_decimal_to_varint() },
|
||||
|
||||
{ {decimal_type, byte_type}, make_castas_fctn_from_integer_to_decimal<int8_t>() },
|
||||
{ {decimal_type, short_type}, make_castas_fctn_from_integer_to_decimal<int16_t>() },
|
||||
{ {decimal_type, int32_type}, make_castas_fctn_from_integer_to_decimal<int32_t>() },
|
||||
{ {decimal_type, long_type}, make_castas_fctn_from_integer_to_decimal<int64_t>() },
|
||||
{ {decimal_type, float_type}, make_castas_fctn_from_float_to_decimal<float>() },
|
||||
{ {decimal_type, double_type}, make_castas_fctn_from_float_to_decimal<double>() },
|
||||
{ {decimal_type, varint_type}, make_castas_fctn_from_integer_to_decimal<utils::multiprecision_int>() },
|
||||
{ {decimal_type, decimal_type}, make_castas_fctn_simple<big_decimal, big_decimal>() },
|
||||
|
||||
{ {ascii_type, byte_type}, make_castas_fctn_to_string<int8_t>() },
|
||||
{ {ascii_type, short_type}, make_castas_fctn_to_string<int16_t>() },
|
||||
{ {ascii_type, int32_type}, make_castas_fctn_to_string<int32_t>() },
|
||||
{ {ascii_type, long_type}, make_castas_fctn_to_string<int64_t>() },
|
||||
{ {ascii_type, float_type}, make_castas_fctn_to_string<float>() },
|
||||
{ {ascii_type, double_type}, make_castas_fctn_to_string<double>() },
|
||||
{ {ascii_type, varint_type}, make_castas_fctn_from_varint_to_string() },
|
||||
{ {ascii_type, decimal_type}, make_castas_fctn_from_decimal_to_string() },
|
||||
|
||||
{ {utf8_type, byte_type}, make_castas_fctn_to_string<int8_t>() },
|
||||
{ {utf8_type, short_type}, make_castas_fctn_to_string<int16_t>() },
|
||||
{ {utf8_type, int32_type}, make_castas_fctn_to_string<int32_t>() },
|
||||
{ {utf8_type, long_type}, make_castas_fctn_to_string<int64_t>() },
|
||||
{ {utf8_type, float_type}, make_castas_fctn_to_string<float>() },
|
||||
{ {utf8_type, double_type}, make_castas_fctn_to_string<double>() },
|
||||
{ {utf8_type, varint_type}, make_castas_fctn_from_varint_to_string() },
|
||||
{ {utf8_type, decimal_type}, make_castas_fctn_from_decimal_to_string() },
|
||||
|
||||
{ {simple_date_type, timestamp_type}, make_castas_fctn_from_timestamp_to_date() },
|
||||
{ {simple_date_type, timeuuid_type}, make_castas_fctn_from_timeuuid_to_date() },
|
||||
|
||||
{ {timestamp_type, simple_date_type}, make_castas_fctn_from_date_to_timestamp() },
|
||||
{ {timestamp_type, timeuuid_type}, make_castas_fctn_from_timeuuid_to_timestamp() },
|
||||
|
||||
{ {ascii_type, timestamp_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, simple_date_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, time_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, timeuuid_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, uuid_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, boolean_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, inet_addr_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {ascii_type, ascii_type}, make_castas_fctn_simple<sstring, sstring>() },
|
||||
|
||||
{ {utf8_type, timestamp_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, simple_date_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, time_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, timeuuid_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, uuid_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, boolean_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, inet_addr_type}, make_castas_fctn_from_dv_to_string() },
|
||||
{ {utf8_type, ascii_type}, make_castas_fctn_simple<sstring, sstring>() },
|
||||
{ {utf8_type, utf8_type}, make_castas_fctn_simple<sstring, sstring>() },
|
||||
};
|
||||
|
||||
static constexpr unsigned next_kind_power_of_2 = next_power_of_2(static_cast<unsigned>(abstract_type::kind::last));
|
||||
static constexpr unsigned cast_switch_case_val(abstract_type::kind A, abstract_type::kind B) {
|
||||
return static_cast<unsigned>(A) * next_kind_power_of_2 + static_cast<unsigned>(B);
|
||||
}
|
||||
} /* Anonymous Namespace */
|
||||
|
||||
castas_fctn get_castas_fctn(data_type to_type, data_type from_type) {
|
||||
if (from_type == to_type) {
|
||||
// Casting any type to itself doesn't make sense, but it is
|
||||
// harmless so allow it instead of reporting a confusing error
|
||||
// message about TypeX not being castable to TypeX.
|
||||
return identity_castas_fctn;
|
||||
auto it_candidate = castas_fctns.find(castas_fctn_key{to_type, from_type});
|
||||
if (it_candidate == castas_fctns.end()) {
|
||||
throw exceptions::invalid_request_exception(format("{} cannot be cast to {}", from_type->name(), to_type->name()));
|
||||
}
|
||||
|
||||
using kind = abstract_type::kind;
|
||||
switch(cast_switch_case_val(to_type->get_kind(), from_type->get_kind())) {
|
||||
case cast_switch_case_val(kind::byte, kind::short_kind):
|
||||
return castas_fctn_simple<int8_t, int16_t>;
|
||||
case cast_switch_case_val(kind::byte, kind::int32):
|
||||
return castas_fctn_simple<int8_t, int32_t>;
|
||||
case cast_switch_case_val(kind::byte, kind::long_kind):
|
||||
return castas_fctn_simple<int8_t, int64_t>;
|
||||
case cast_switch_case_val(kind::byte, kind::float_kind):
|
||||
return castas_fctn_simple<int8_t, float>;
|
||||
case cast_switch_case_val(kind::byte, kind::double_kind):
|
||||
return castas_fctn_simple<int8_t, double>;
|
||||
case cast_switch_case_val(kind::byte, kind::varint):
|
||||
return castas_fctn_from_varint_to_integer<int8_t>;
|
||||
case cast_switch_case_val(kind::byte, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_integer<int8_t>;
|
||||
|
||||
case cast_switch_case_val(kind::short_kind, kind::byte):
|
||||
return castas_fctn_simple<int16_t, int8_t>;
|
||||
case cast_switch_case_val(kind::short_kind, kind::int32):
|
||||
return castas_fctn_simple<int16_t, int32_t>;
|
||||
case cast_switch_case_val(kind::short_kind, kind::long_kind):
|
||||
return castas_fctn_simple<int16_t, int64_t>;
|
||||
case cast_switch_case_val(kind::short_kind, kind::float_kind):
|
||||
return castas_fctn_simple<int16_t, float>;
|
||||
case cast_switch_case_val(kind::short_kind, kind::double_kind):
|
||||
return castas_fctn_simple<int16_t, double>;
|
||||
case cast_switch_case_val(kind::short_kind, kind::varint):
|
||||
return castas_fctn_from_varint_to_integer<int16_t>;
|
||||
case cast_switch_case_val(kind::short_kind, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_integer<int16_t>;
|
||||
|
||||
case cast_switch_case_val(kind::int32, kind::byte):
|
||||
return castas_fctn_simple<int32_t, int8_t>;
|
||||
case cast_switch_case_val(kind::int32, kind::short_kind):
|
||||
return castas_fctn_simple<int32_t, int16_t>;
|
||||
case cast_switch_case_val(kind::int32, kind::long_kind):
|
||||
return castas_fctn_simple<int32_t, int64_t>;
|
||||
case cast_switch_case_val(kind::int32, kind::float_kind):
|
||||
return castas_fctn_simple<int32_t, float>;
|
||||
case cast_switch_case_val(kind::int32, kind::double_kind):
|
||||
return castas_fctn_simple<int32_t, double>;
|
||||
case cast_switch_case_val(kind::int32, kind::varint):
|
||||
return castas_fctn_from_varint_to_integer<int32_t>;
|
||||
case cast_switch_case_val(kind::int32, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_integer<int32_t>;
|
||||
|
||||
case cast_switch_case_val(kind::long_kind, kind::byte):
|
||||
return castas_fctn_simple<int64_t, int8_t>;
|
||||
case cast_switch_case_val(kind::long_kind, kind::short_kind):
|
||||
return castas_fctn_simple<int64_t, int16_t>;
|
||||
case cast_switch_case_val(kind::long_kind, kind::int32):
|
||||
return castas_fctn_simple<int64_t, int32_t>;
|
||||
case cast_switch_case_val(kind::long_kind, kind::float_kind):
|
||||
return castas_fctn_simple<int64_t, float>;
|
||||
case cast_switch_case_val(kind::long_kind, kind::double_kind):
|
||||
return castas_fctn_simple<int64_t, double>;
|
||||
case cast_switch_case_val(kind::long_kind, kind::varint):
|
||||
return castas_fctn_from_varint_to_integer<int64_t>;
|
||||
case cast_switch_case_val(kind::long_kind, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_integer<int64_t>;
|
||||
|
||||
case cast_switch_case_val(kind::float_kind, kind::byte):
|
||||
return castas_fctn_simple<float, int8_t>;
|
||||
case cast_switch_case_val(kind::float_kind, kind::short_kind):
|
||||
return castas_fctn_simple<float, int16_t>;
|
||||
case cast_switch_case_val(kind::float_kind, kind::int32):
|
||||
return castas_fctn_simple<float, int32_t>;
|
||||
case cast_switch_case_val(kind::float_kind, kind::long_kind):
|
||||
return castas_fctn_simple<float, int64_t>;
|
||||
case cast_switch_case_val(kind::float_kind, kind::double_kind):
|
||||
return castas_fctn_simple<float, double>;
|
||||
case cast_switch_case_val(kind::float_kind, kind::varint):
|
||||
return castas_fctn_simple<float, utils::multiprecision_int>;
|
||||
case cast_switch_case_val(kind::float_kind, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_float<float>;
|
||||
|
||||
case cast_switch_case_val(kind::double_kind, kind::byte):
|
||||
return castas_fctn_simple<double, int8_t>;
|
||||
case cast_switch_case_val(kind::double_kind, kind::short_kind):
|
||||
return castas_fctn_simple<double, int16_t>;
|
||||
case cast_switch_case_val(kind::double_kind, kind::int32):
|
||||
return castas_fctn_simple<double, int32_t>;
|
||||
case cast_switch_case_val(kind::double_kind, kind::long_kind):
|
||||
return castas_fctn_simple<double, int64_t>;
|
||||
case cast_switch_case_val(kind::double_kind, kind::float_kind):
|
||||
return castas_fctn_simple<double, float>;
|
||||
case cast_switch_case_val(kind::double_kind, kind::varint):
|
||||
return castas_fctn_simple<double, utils::multiprecision_int>;
|
||||
case cast_switch_case_val(kind::double_kind, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_float<double>;
|
||||
|
||||
case cast_switch_case_val(kind::varint, kind::byte):
|
||||
return castas_fctn_simple<utils::multiprecision_int, int8_t>;
|
||||
case cast_switch_case_val(kind::varint, kind::short_kind):
|
||||
return castas_fctn_simple<utils::multiprecision_int, int16_t>;
|
||||
case cast_switch_case_val(kind::varint, kind::int32):
|
||||
return castas_fctn_simple<utils::multiprecision_int, int32_t>;
|
||||
case cast_switch_case_val(kind::varint, kind::long_kind):
|
||||
return castas_fctn_simple<utils::multiprecision_int, int64_t>;
|
||||
case cast_switch_case_val(kind::varint, kind::float_kind):
|
||||
return castas_fctn_simple<utils::multiprecision_int, float>;
|
||||
case cast_switch_case_val(kind::varint, kind::double_kind):
|
||||
return castas_fctn_simple<utils::multiprecision_int, double>;
|
||||
case cast_switch_case_val(kind::varint, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_varint;
|
||||
|
||||
case cast_switch_case_val(kind::decimal, kind::byte):
|
||||
return castas_fctn_from_integer_to_decimal<int8_t>;
|
||||
case cast_switch_case_val(kind::decimal, kind::short_kind):
|
||||
return castas_fctn_from_integer_to_decimal<int16_t>;
|
||||
case cast_switch_case_val(kind::decimal, kind::int32):
|
||||
return castas_fctn_from_integer_to_decimal<int32_t>;
|
||||
case cast_switch_case_val(kind::decimal, kind::long_kind):
|
||||
return castas_fctn_from_integer_to_decimal<int64_t>;
|
||||
case cast_switch_case_val(kind::decimal, kind::float_kind):
|
||||
return castas_fctn_from_float_to_decimal<float>;
|
||||
case cast_switch_case_val(kind::decimal, kind::double_kind):
|
||||
return castas_fctn_from_float_to_decimal<double>;
|
||||
case cast_switch_case_val(kind::decimal, kind::varint):
|
||||
return castas_fctn_from_integer_to_decimal<utils::multiprecision_int>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::byte):
|
||||
case cast_switch_case_val(kind::utf8, kind::byte):
|
||||
return castas_fctn_to_string<int8_t>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::short_kind):
|
||||
case cast_switch_case_val(kind::utf8, kind::short_kind):
|
||||
return castas_fctn_to_string<int16_t>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::int32):
|
||||
case cast_switch_case_val(kind::utf8, kind::int32):
|
||||
return castas_fctn_to_string<int32_t>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::long_kind):
|
||||
case cast_switch_case_val(kind::utf8, kind::long_kind):
|
||||
return castas_fctn_to_string<int64_t>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::float_kind):
|
||||
case cast_switch_case_val(kind::utf8, kind::float_kind):
|
||||
return castas_fctn_to_string<float>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::double_kind):
|
||||
case cast_switch_case_val(kind::utf8, kind::double_kind):
|
||||
return castas_fctn_to_string<double>;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::varint):
|
||||
case cast_switch_case_val(kind::utf8, kind::varint):
|
||||
return castas_fctn_from_varint_to_string;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::decimal):
|
||||
case cast_switch_case_val(kind::utf8, kind::decimal):
|
||||
return castas_fctn_from_decimal_to_string;
|
||||
|
||||
case cast_switch_case_val(kind::simple_date, kind::timestamp):
|
||||
return castas_fctn_from_timestamp_to_date;
|
||||
case cast_switch_case_val(kind::simple_date, kind::timeuuid):
|
||||
return castas_fctn_from_timeuuid_to_date;
|
||||
|
||||
case cast_switch_case_val(kind::timestamp, kind::simple_date):
|
||||
return castas_fctn_from_date_to_timestamp;
|
||||
case cast_switch_case_val(kind::timestamp, kind::timeuuid):
|
||||
return castas_fctn_from_timeuuid_to_timestamp;
|
||||
|
||||
case cast_switch_case_val(kind::ascii, kind::timestamp):
|
||||
case cast_switch_case_val(kind::ascii, kind::simple_date):
|
||||
case cast_switch_case_val(kind::ascii, kind::time):
|
||||
case cast_switch_case_val(kind::ascii, kind::timeuuid):
|
||||
case cast_switch_case_val(kind::ascii, kind::uuid):
|
||||
case cast_switch_case_val(kind::ascii, kind::boolean):
|
||||
case cast_switch_case_val(kind::ascii, kind::inet):
|
||||
case cast_switch_case_val(kind::utf8, kind::timestamp):
|
||||
case cast_switch_case_val(kind::utf8, kind::simple_date):
|
||||
case cast_switch_case_val(kind::utf8, kind::time):
|
||||
case cast_switch_case_val(kind::utf8, kind::timeuuid):
|
||||
case cast_switch_case_val(kind::utf8, kind::uuid):
|
||||
case cast_switch_case_val(kind::utf8, kind::boolean):
|
||||
case cast_switch_case_val(kind::utf8, kind::inet):
|
||||
return castas_fctn_from_dv_to_string;
|
||||
case cast_switch_case_val(kind::utf8, kind::ascii):
|
||||
return castas_fctn_simple<sstring, sstring>;
|
||||
}
|
||||
throw exceptions::invalid_request_exception(format("{} cannot be cast to {}", from_type->name(), to_type->name()));
|
||||
return it_candidate->second;
|
||||
}
|
||||
|
||||
shared_ptr<function> castas_functions::get(data_type to_type, const std::vector<shared_ptr<cql3::selection::selector>>& provided_args) {
|
||||
|
||||
@@ -58,7 +58,7 @@ namespace functions {
|
||||
* Support for CAST(. AS .) functions.
|
||||
*/
|
||||
|
||||
using castas_fctn = data_value(*)(data_value);
|
||||
using castas_fctn = std::function<data_value(data_value)>;
|
||||
|
||||
castas_fctn get_castas_fctn(data_type to_type, data_type from_type);
|
||||
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "error_injection_fcts.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "types/list.hh"
|
||||
|
||||
namespace cql3
|
||||
{
|
||||
|
||||
namespace functions
|
||||
{
|
||||
|
||||
namespace error_injection
|
||||
{
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
template <typename Func, bool Pure>
|
||||
class failure_injection_function_for : public failure_injection_function {
|
||||
Func _func;
|
||||
public:
|
||||
failure_injection_function_for(sstring name,
|
||||
data_type return_type,
|
||||
const std::vector<data_type> arg_types,
|
||||
Func&& func)
|
||||
: failure_injection_function(std::move(name), std::move(return_type), std::move(arg_types))
|
||||
, _func(std::forward<Func>(func)) {}
|
||||
|
||||
bool is_pure() const override {
|
||||
return Pure;
|
||||
}
|
||||
|
||||
bytes_opt execute(cql_serialization_format sf, const std::vector<bytes_opt>& parameters) override {
|
||||
return _func(sf, parameters);
|
||||
}
|
||||
};
|
||||
|
||||
template <bool Pure, typename Func>
|
||||
shared_ptr<function>
|
||||
make_failure_injection_function(sstring name,
|
||||
data_type return_type,
|
||||
std::vector<data_type> args_type,
|
||||
Func&& func) {
|
||||
return ::make_shared<failure_injection_function_for<Func, Pure>>(std::move(name),
|
||||
std::move(return_type),
|
||||
std::move(args_type),
|
||||
std::forward<Func>(func));
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
shared_ptr<function> make_enable_injection_function() {
|
||||
return make_failure_injection_function<false>("enable_injection", empty_type, { ascii_type, ascii_type },
|
||||
[] (cql_serialization_format, const std::vector<bytes_opt>& parameters) {
|
||||
sstring injection_name = ascii_type->get_string(parameters[0].value());
|
||||
const bool one_shot = ascii_type->get_string(parameters[1].value()) == "true";
|
||||
smp::invoke_on_all([injection_name, one_shot] () mutable {
|
||||
utils::get_local_injector().enable(injection_name, one_shot);
|
||||
}).get0();
|
||||
return std::nullopt;
|
||||
});
|
||||
}
|
||||
|
||||
shared_ptr<function> make_disable_injection_function() {
|
||||
return make_failure_injection_function<false>("disable_injection", empty_type, { ascii_type },
|
||||
[] (cql_serialization_format, const std::vector<bytes_opt>& parameters) {
|
||||
sstring injection_name = ascii_type->get_string(parameters[0].value());
|
||||
smp::invoke_on_all([injection_name] () mutable {
|
||||
utils::get_local_injector().disable(injection_name);
|
||||
}).get0();
|
||||
return std::nullopt;
|
||||
});
|
||||
}
|
||||
|
||||
shared_ptr<function> make_enabled_injections_function() {
|
||||
const auto list_type_inst = list_type_impl::get_instance(ascii_type, false);
|
||||
return make_failure_injection_function<true>("enabled_injections", list_type_inst, {},
|
||||
[list_type_inst] (cql_serialization_format, const std::vector<bytes_opt>&) -> bytes {
|
||||
return seastar::map_reduce(smp::all_cpus(), [] (unsigned) {
|
||||
return make_ready_future<std::vector<sstring>>(utils::get_local_injector().enabled_injections());
|
||||
}, std::vector<data_value>(),
|
||||
[](std::vector<data_value> a, std::vector<sstring>&& b) -> std::vector<data_value> {
|
||||
for (auto&& x : b) {
|
||||
if (a.end() == std::find(a.begin(), a.end(), x)) {
|
||||
a.push_back(data_value(std::move(x)));
|
||||
}
|
||||
}
|
||||
return a;
|
||||
}).then([list_type_inst](std::vector<data_value> const& active_injections) {
|
||||
auto list_val = make_list_value(list_type_inst, active_injections);
|
||||
return list_type_inst->decompose(list_val);
|
||||
}).get0();
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace error_injection
|
||||
|
||||
} // namespace functions
|
||||
|
||||
} // namespace cql3
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "native_scalar_function.hh"
|
||||
|
||||
namespace cql3
|
||||
{
|
||||
|
||||
namespace functions
|
||||
{
|
||||
|
||||
namespace error_injection
|
||||
{
|
||||
|
||||
class failure_injection_function : public native_scalar_function {
|
||||
protected:
|
||||
failure_injection_function(sstring name, data_type return_type, std::vector<data_type> args_type)
|
||||
: native_scalar_function(std::move(name), std::move(return_type), std::move(args_type)) {
|
||||
}
|
||||
|
||||
bool requires_thread() const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
shared_ptr<function> make_enable_injection_function();
|
||||
shared_ptr<function> make_disable_injection_function();
|
||||
shared_ptr<function> make_enabled_injections_function();
|
||||
|
||||
} // namespace error_injection
|
||||
|
||||
} // namespace functions
|
||||
|
||||
} // namespace cql3
|
||||
@@ -82,6 +82,7 @@ public:
|
||||
|
||||
virtual void print(std::ostream& os) const = 0;
|
||||
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const = 0;
|
||||
virtual bool has_reference_to(function& f) const = 0;
|
||||
|
||||
/**
|
||||
* Returns the name of the function to use within a ResultSet.
|
||||
|
||||
@@ -74,12 +74,12 @@ public:
|
||||
raw(function_name name, std::vector<shared_ptr<term::raw>> terms)
|
||||
: _name(std::move(name)), _terms(std::move(terms)) {
|
||||
}
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override;
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
private:
|
||||
// All parameters must be terminal
|
||||
static bytes_opt execute(scalar_function& fun, std::vector<shared_ptr<term>> parameters);
|
||||
public:
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const override;
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, shared_ptr<column_specification> receiver) const override;
|
||||
virtual sstring to_string() const override;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -37,8 +37,6 @@
|
||||
#include "concrete_types.hh"
|
||||
#include "as_json_function.hh"
|
||||
|
||||
#include "error_injection_fcts.hh"
|
||||
|
||||
namespace std {
|
||||
std::ostream& operator<<(std::ostream& os, const std::vector<data_type>& arg_types) {
|
||||
for (size_t i = 0; i < arg_types.size(); ++i) {
|
||||
@@ -109,10 +107,6 @@ functions::init() {
|
||||
declare(make_blob_as_varchar_fct());
|
||||
add_agg_functions(ret);
|
||||
|
||||
declare(error_injection::make_enable_injection_function());
|
||||
declare(error_injection::make_disable_injection_function());
|
||||
declare(error_injection::make_enabled_injections_function());
|
||||
|
||||
// also needed for smp:
|
||||
#if 0
|
||||
MigrationManager.instance.register(new FunctionsMigrationListener());
|
||||
@@ -147,17 +141,22 @@ void functions::remove_function(const function_name& name, const std::vector<dat
|
||||
with_udf_iter(name, arg_types, [] (functions::declared_t::iterator i) { _declared.erase(i); });
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
shared_ptr<column_specification>
|
||||
functions::make_arg_spec(const sstring& receiver_ks, const sstring& receiver_cf,
|
||||
const function& fun, size_t i) {
|
||||
auto&& name = boost::lexical_cast<std::string>(fun.name());
|
||||
std::transform(name.begin(), name.end(), name.begin(), ::tolower);
|
||||
return make_lw_shared<column_specification>(receiver_ks,
|
||||
return ::make_shared<column_specification>(receiver_ks,
|
||||
receiver_cf,
|
||||
::make_shared<column_identifier>(format("arg{:d}({})", i, name), true),
|
||||
fun.arg_types()[i]);
|
||||
}
|
||||
|
||||
int
|
||||
functions::get_overload_count(const function_name& name) {
|
||||
return _declared.count(name);
|
||||
}
|
||||
|
||||
inline
|
||||
shared_ptr<function>
|
||||
make_to_json_function(data_type t) {
|
||||
@@ -188,7 +187,7 @@ functions::get(database& db,
|
||||
const std::vector<shared_ptr<assignment_testable>>& provided_args,
|
||||
const sstring& receiver_ks,
|
||||
const sstring& receiver_cf,
|
||||
const column_specification* receiver) {
|
||||
shared_ptr<column_specification> receiver) {
|
||||
|
||||
static const function_name TOKEN_FUNCTION_NAME = function_name::native_function("token");
|
||||
static const function_name TO_JSON_FUNCTION_NAME = function_name::native_function("tojson");
|
||||
@@ -371,7 +370,7 @@ functions::validate_types(database& db,
|
||||
}
|
||||
|
||||
auto&& expected = make_arg_spec(receiver_ks, receiver_cf, *fun, i);
|
||||
if (!is_assignable(provided->test_assignment(db, keyspace, *expected))) {
|
||||
if (!is_assignable(provided->test_assignment(db, keyspace, expected))) {
|
||||
throw exceptions::invalid_request_exception(
|
||||
format("Type error: {} cannot be passed as argument {:d} of function {} of type {}",
|
||||
provided, i, fun->name(), expected->type->as_cql3_type()));
|
||||
@@ -398,7 +397,7 @@ functions::match_arguments(database& db, const sstring& keyspace,
|
||||
continue;
|
||||
}
|
||||
auto&& expected = make_arg_spec(receiver_ks, receiver_cf, *fun, i);
|
||||
auto arg_res = provided->test_assignment(db, keyspace, *expected);
|
||||
auto arg_res = provided->test_assignment(db, keyspace, expected);
|
||||
if (arg_res == assignment_testable::test_result::NOT_ASSIGNABLE) {
|
||||
return assignment_testable::test_result::NOT_ASSIGNABLE;
|
||||
}
|
||||
@@ -508,14 +507,14 @@ function_call::make_terminal(shared_ptr<function> fun, cql3::raw_value result, c
|
||||
}
|
||||
|
||||
::shared_ptr<term>
|
||||
function_call::raw::prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const {
|
||||
function_call::raw::prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const {
|
||||
std::vector<shared_ptr<assignment_testable>> args;
|
||||
args.reserve(_terms.size());
|
||||
std::transform(_terms.begin(), _terms.end(), std::back_inserter(args),
|
||||
[] (auto&& x) -> shared_ptr<assignment_testable> {
|
||||
return x;
|
||||
});
|
||||
auto&& fun = functions::functions::get(db, keyspace, _name, args, receiver->ks_name, receiver->cf_name, receiver.get());
|
||||
auto&& fun = functions::functions::get(db, keyspace, _name, args, receiver->ks_name, receiver->cf_name, receiver);
|
||||
if (!fun) {
|
||||
throw exceptions::invalid_request_exception(format("Unknown function {} called", _name));
|
||||
}
|
||||
@@ -573,16 +572,16 @@ function_call::raw::execute(scalar_function& fun, std::vector<shared_ptr<term>>
|
||||
}
|
||||
|
||||
assignment_testable::test_result
|
||||
function_call::raw::test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const {
|
||||
function_call::raw::test_assignment(database& db, const sstring& keyspace, shared_ptr<column_specification> receiver) const {
|
||||
// Note: Functions.get() will return null if the function doesn't exist, or throw is no function matching
|
||||
// the arguments can be found. We may get one of those if an undefined/wrong function is used as argument
|
||||
// of another, existing, function. In that case, we return true here because we'll throw a proper exception
|
||||
// later with a more helpful error message that if we were to return false here.
|
||||
try {
|
||||
auto&& fun = functions::get(db, keyspace, _name, _terms, receiver.ks_name, receiver.cf_name, &receiver);
|
||||
if (fun && receiver.type == fun->return_type()) {
|
||||
auto&& fun = functions::get(db, keyspace, _name, _terms, receiver->ks_name, receiver->cf_name, receiver);
|
||||
if (fun && receiver->type == fun->return_type()) {
|
||||
return assignment_testable::test_result::EXACT_MATCH;
|
||||
} else if (!fun || receiver.type->is_value_compatible_with(*fun->return_type())) {
|
||||
} else if (!fun || receiver->type->is_value_compatible_with(*fun->return_type())) {
|
||||
return assignment_testable::test_result::WEAKLY_ASSIGNABLE;
|
||||
} else {
|
||||
return assignment_testable::test_result::NOT_ASSIGNABLE;
|
||||
|
||||
@@ -67,8 +67,9 @@ class functions {
|
||||
private:
|
||||
static std::unordered_multimap<function_name, shared_ptr<function>> init();
|
||||
public:
|
||||
static lw_shared_ptr<column_specification> make_arg_spec(const sstring& receiver_ks, const sstring& receiver_cf,
|
||||
static shared_ptr<column_specification> make_arg_spec(const sstring& receiver_ks, const sstring& receiver_cf,
|
||||
const function& fun, size_t i);
|
||||
static int get_overload_count(const function_name& name);
|
||||
public:
|
||||
static shared_ptr<function> get(database& db,
|
||||
const sstring& keyspace,
|
||||
@@ -76,7 +77,7 @@ public:
|
||||
const std::vector<shared_ptr<assignment_testable>>& provided_args,
|
||||
const sstring& receiver_ks,
|
||||
const sstring& receiver_cf,
|
||||
const column_specification* receiver = nullptr);
|
||||
::shared_ptr<column_specification> receiver = nullptr);
|
||||
template <typename AssignmentTestablePtrRange>
|
||||
static shared_ptr<function> get(database& db,
|
||||
const sstring& keyspace,
|
||||
@@ -84,7 +85,7 @@ public:
|
||||
AssignmentTestablePtrRange&& provided_args,
|
||||
const sstring& receiver_ks,
|
||||
const sstring& receiver_cf,
|
||||
const column_specification* receiver = nullptr) {
|
||||
::shared_ptr<column_specification> receiver = nullptr) {
|
||||
const std::vector<shared_ptr<assignment_testable>> args(std::begin(provided_args), std::end(provided_args));
|
||||
return get(db, keyspace, name, args, receiver_ks, receiver_cf, receiver);
|
||||
}
|
||||
|
||||
@@ -30,28 +30,28 @@
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
shared_ptr<column_specification>
|
||||
lists::index_spec_of(const column_specification& column) {
|
||||
return make_lw_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
return ::make_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
::make_shared<column_identifier>(format("idx({})", *column.name), true), int32_type);
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
shared_ptr<column_specification>
|
||||
lists::value_spec_of(const column_specification& column) {
|
||||
return make_lw_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
return ::make_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
::make_shared<column_identifier>(format("value({})", *column.name), true),
|
||||
dynamic_pointer_cast<const list_type_impl>(column.type)->get_elements_type());
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
shared_ptr<column_specification>
|
||||
lists::uuid_index_spec_of(const column_specification& column) {
|
||||
return make_lw_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
return ::make_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
::make_shared<column_identifier>(format("uuid_idx({})", *column.name), true), uuid_type);
|
||||
}
|
||||
|
||||
|
||||
shared_ptr<term>
|
||||
lists::literal::prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const {
|
||||
lists::literal::prepare(database& db, const sstring& keyspace, shared_ptr<column_specification> receiver) const {
|
||||
validate_assignable_to(db, keyspace, *receiver);
|
||||
|
||||
// In Cassandra, an empty (unfrozen) map/set/list is equivalent to the column being null. In
|
||||
@@ -93,7 +93,7 @@ lists::literal::validate_assignable_to(database& db, const sstring keyspace, con
|
||||
}
|
||||
auto&& value_spec = value_spec_of(receiver);
|
||||
for (auto rt : _elements) {
|
||||
if (!is_assignable(rt->test_assignment(db, keyspace, *value_spec))) {
|
||||
if (!is_assignable(rt->test_assignment(db, keyspace, value_spec))) {
|
||||
throw exceptions::invalid_request_exception(format("Invalid list literal for {}: value {} is not of type {}",
|
||||
*receiver.name, *rt, value_spec->type->as_cql3_type()));
|
||||
}
|
||||
@@ -101,8 +101,8 @@ lists::literal::validate_assignable_to(database& db, const sstring keyspace, con
|
||||
}
|
||||
|
||||
assignment_testable::test_result
|
||||
lists::literal::test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const {
|
||||
if (!dynamic_pointer_cast<const list_type_impl>(receiver.type)) {
|
||||
lists::literal::test_assignment(database& db, const sstring& keyspace, shared_ptr<column_specification> receiver) const {
|
||||
if (!dynamic_pointer_cast<const list_type_impl>(receiver->type)) {
|
||||
return assignment_testable::test_result::NOT_ASSIGNABLE;
|
||||
}
|
||||
|
||||
@@ -111,11 +111,11 @@ lists::literal::test_assignment(database& db, const sstring& keyspace, const col
|
||||
return assignment_testable::test_result::WEAKLY_ASSIGNABLE;
|
||||
}
|
||||
|
||||
auto&& value_spec = value_spec_of(receiver);
|
||||
auto&& value_spec = value_spec_of(*receiver);
|
||||
std::vector<shared_ptr<assignment_testable>> to_test;
|
||||
to_test.reserve(_elements.size());
|
||||
std::copy(_elements.begin(), _elements.end(), std::back_inserter(to_test));
|
||||
return assignment_testable::test_all(db, keyspace, *value_spec, to_test);
|
||||
return assignment_testable::test_all(db, keyspace, value_spec, to_test);
|
||||
}
|
||||
|
||||
sstring
|
||||
|
||||
@@ -54,9 +54,9 @@ namespace cql3 {
|
||||
class lists {
|
||||
lists() = delete;
|
||||
public:
|
||||
static lw_shared_ptr<column_specification> index_spec_of(const column_specification&);
|
||||
static lw_shared_ptr<column_specification> value_spec_of(const column_specification&);
|
||||
static lw_shared_ptr<column_specification> uuid_index_spec_of(const column_specification&);
|
||||
static shared_ptr<column_specification> index_spec_of(const column_specification&);
|
||||
static shared_ptr<column_specification> value_spec_of(const column_specification&);
|
||||
static shared_ptr<column_specification> uuid_index_spec_of(const column_specification&);
|
||||
|
||||
class literal : public term::raw {
|
||||
const std::vector<shared_ptr<term::raw>> _elements;
|
||||
@@ -64,11 +64,11 @@ public:
|
||||
explicit literal(std::vector<shared_ptr<term::raw>> elements)
|
||||
: _elements(std::move(elements)) {
|
||||
}
|
||||
virtual shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override;
|
||||
virtual shared_ptr<term> prepare(database& db, const sstring& keyspace, shared_ptr<column_specification> receiver) const override;
|
||||
private:
|
||||
void validate_assignable_to(database& db, const sstring keyspace, const column_specification& receiver) const;
|
||||
public:
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const override;
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, shared_ptr<column_specification> receiver) const override;
|
||||
virtual sstring to_string() const override;
|
||||
};
|
||||
|
||||
@@ -113,7 +113,7 @@ public:
|
||||
*/
|
||||
class marker : public abstract_marker {
|
||||
public:
|
||||
marker(int32_t bind_index, lw_shared_ptr<column_specification> receiver)
|
||||
marker(int32_t bind_index, ::shared_ptr<column_specification> receiver)
|
||||
: abstract_marker{bind_index, std::move(receiver)}
|
||||
{ }
|
||||
virtual ::shared_ptr<terminal> bind(const query_options& options) override;
|
||||
|
||||
26
cql3/maps.cc
26
cql3/maps.cc
@@ -51,22 +51,22 @@
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
shared_ptr<column_specification>
|
||||
maps::key_spec_of(const column_specification& column) {
|
||||
return make_lw_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
return ::make_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
::make_shared<column_identifier>(format("key({})", *column.name), true),
|
||||
dynamic_pointer_cast<const map_type_impl>(column.type)->get_keys_type());
|
||||
}
|
||||
|
||||
lw_shared_ptr<column_specification>
|
||||
shared_ptr<column_specification>
|
||||
maps::value_spec_of(const column_specification& column) {
|
||||
return make_lw_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
return ::make_shared<column_specification>(column.ks_name, column.cf_name,
|
||||
::make_shared<column_identifier>(format("value({})", *column.name), true),
|
||||
dynamic_pointer_cast<const map_type_impl>(column.type)->get_values_type());
|
||||
}
|
||||
|
||||
::shared_ptr<term>
|
||||
maps::literal::prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const {
|
||||
maps::literal::prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const {
|
||||
validate_assignable_to(db, keyspace, *receiver);
|
||||
|
||||
auto key_spec = maps::key_spec_of(*receiver);
|
||||
@@ -104,31 +104,31 @@ maps::literal::validate_assignable_to(database& db, const sstring& keyspace, con
|
||||
auto&& key_spec = maps::key_spec_of(receiver);
|
||||
auto&& value_spec = maps::value_spec_of(receiver);
|
||||
for (auto&& entry : entries) {
|
||||
if (!is_assignable(entry.first->test_assignment(db, keyspace, *key_spec))) {
|
||||
if (!is_assignable(entry.first->test_assignment(db, keyspace, key_spec))) {
|
||||
throw exceptions::invalid_request_exception(format("Invalid map literal for {}: key {} is not of type {}", *receiver.name, *entry.first, key_spec->type->as_cql3_type()));
|
||||
}
|
||||
if (!is_assignable(entry.second->test_assignment(db, keyspace, *value_spec))) {
|
||||
if (!is_assignable(entry.second->test_assignment(db, keyspace, value_spec))) {
|
||||
throw exceptions::invalid_request_exception(format("Invalid map literal for {}: value {} is not of type {}", *receiver.name, *entry.second, value_spec->type->as_cql3_type()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assignment_testable::test_result
|
||||
maps::literal::test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const {
|
||||
if (!dynamic_pointer_cast<const map_type_impl>(receiver.type)) {
|
||||
maps::literal::test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const {
|
||||
if (!dynamic_pointer_cast<const map_type_impl>(receiver->type)) {
|
||||
return assignment_testable::test_result::NOT_ASSIGNABLE;
|
||||
}
|
||||
// If there is no elements, we can't say it's an exact match (an empty map if fundamentally polymorphic).
|
||||
if (entries.empty()) {
|
||||
return assignment_testable::test_result::WEAKLY_ASSIGNABLE;
|
||||
}
|
||||
auto key_spec = maps::key_spec_of(receiver);
|
||||
auto value_spec = maps::value_spec_of(receiver);
|
||||
auto key_spec = maps::key_spec_of(*receiver);
|
||||
auto value_spec = maps::value_spec_of(*receiver);
|
||||
// It's an exact match if all are exact match, but is not assignable as soon as any is non assignable.
|
||||
auto res = assignment_testable::test_result::EXACT_MATCH;
|
||||
for (auto entry : entries) {
|
||||
auto t1 = entry.first->test_assignment(db, keyspace, *key_spec);
|
||||
auto t2 = entry.second->test_assignment(db, keyspace, *value_spec);
|
||||
auto t1 = entry.first->test_assignment(db, keyspace, key_spec);
|
||||
auto t2 = entry.second->test_assignment(db, keyspace, value_spec);
|
||||
if (t1 == assignment_testable::test_result::NOT_ASSIGNABLE || t2 == assignment_testable::test_result::NOT_ASSIGNABLE)
|
||||
return assignment_testable::test_result::NOT_ASSIGNABLE;
|
||||
if (t1 != assignment_testable::test_result::EXACT_MATCH || t2 != assignment_testable::test_result::EXACT_MATCH)
|
||||
|
||||
10
cql3/maps.hh
10
cql3/maps.hh
@@ -56,8 +56,8 @@ class maps {
|
||||
private:
|
||||
maps() = delete;
|
||||
public:
|
||||
static lw_shared_ptr<column_specification> key_spec_of(const column_specification& column);
|
||||
static lw_shared_ptr<column_specification> value_spec_of(const column_specification& column);
|
||||
static shared_ptr<column_specification> key_spec_of(const column_specification& column);
|
||||
static shared_ptr<column_specification> value_spec_of(const column_specification& column);
|
||||
|
||||
class literal : public term::raw {
|
||||
public:
|
||||
@@ -66,11 +66,11 @@ public:
|
||||
literal(const std::vector<std::pair<::shared_ptr<term::raw>, ::shared_ptr<term::raw>>>& entries_)
|
||||
: entries{entries_}
|
||||
{ }
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, lw_shared_ptr<column_specification> receiver) const override;
|
||||
virtual ::shared_ptr<term> prepare(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
private:
|
||||
void validate_assignable_to(database& db, const sstring& keyspace, const column_specification& receiver) const;
|
||||
public:
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, const column_specification& receiver) const override;
|
||||
virtual assignment_testable::test_result test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) const override;
|
||||
virtual sstring to_string() const override;
|
||||
};
|
||||
|
||||
@@ -104,7 +104,7 @@ public:
|
||||
|
||||
class marker : public abstract_marker {
|
||||
public:
|
||||
marker(int32_t bind_index, lw_shared_ptr<column_specification> receiver)
|
||||
marker(int32_t bind_index, ::shared_ptr<column_specification> receiver)
|
||||
: abstract_marker{bind_index, std::move(receiver)}
|
||||
{ }
|
||||
virtual ::shared_ptr<terminal> bind(const query_options& options) override;
|
||||
|
||||
@@ -140,7 +140,7 @@ protected:
|
||||
virtual shared_ptr<restrictions::restriction> new_EQ_restriction(database& db, schema_ptr schema,
|
||||
variable_specifications& bound_names) override {
|
||||
auto rs = receivers(db, *schema);
|
||||
std::vector<lw_shared_ptr<column_specification>> col_specs(rs.size());
|
||||
std::vector<::shared_ptr<column_specification>> col_specs(rs.size());
|
||||
std::transform(rs.begin(), rs.end(), col_specs.begin(), [] (auto cs) {
|
||||
return cs->column_specification;
|
||||
});
|
||||
@@ -151,7 +151,7 @@ protected:
|
||||
virtual shared_ptr<restrictions::restriction> new_IN_restriction(database& db, schema_ptr schema,
|
||||
variable_specifications& bound_names) override {
|
||||
auto rs = receivers(db, *schema);
|
||||
std::vector<lw_shared_ptr<column_specification>> col_specs(rs.size());
|
||||
std::vector<::shared_ptr<column_specification>> col_specs(rs.size());
|
||||
std::transform(rs.begin(), rs.end(), col_specs.begin(), [] (auto cs) {
|
||||
return cs->column_specification;
|
||||
});
|
||||
@@ -175,7 +175,7 @@ protected:
|
||||
variable_specifications& bound_names,
|
||||
statements::bound bound, bool inclusive) override {
|
||||
auto rs = receivers(db, *schema);
|
||||
std::vector<lw_shared_ptr<column_specification>> col_specs(rs.size());
|
||||
std::vector<::shared_ptr<column_specification>> col_specs(rs.size());
|
||||
std::transform(rs.begin(), rs.end(), col_specs.begin(), [] (auto cs) {
|
||||
return cs->column_specification;
|
||||
});
|
||||
@@ -200,7 +200,7 @@ protected:
|
||||
return ::make_shared(multi_column_relation(std::move(new_entities), _relation_type, _values_or_marker, _in_values, _in_marker));
|
||||
}
|
||||
|
||||
virtual shared_ptr<term> to_term(const std::vector<lw_shared_ptr<column_specification>>& receivers,
|
||||
virtual shared_ptr<term> to_term(const std::vector<shared_ptr<column_specification>>& receivers,
|
||||
const term::raw& raw, database& db, const sstring& keyspace,
|
||||
variable_specifications& bound_names) const override {
|
||||
const auto& as_multi_column_raw = dynamic_cast<const term::multi_column_raw&>(raw);
|
||||
|
||||
@@ -87,10 +87,10 @@ operation::set_element::prepare(database& db, const sstring& keyspace, const col
|
||||
}
|
||||
|
||||
bool
|
||||
operation::set_element::is_compatible_with(const std::unique_ptr<raw_update>& other) const {
|
||||
operation::set_element::is_compatible_with(shared_ptr<raw_update> other) const {
|
||||
// TODO: we could check that the other operation is not setting the same element
|
||||
// too (but since the index/key set may be a bind variables we can't always do it at this point)
|
||||
return !dynamic_cast<const set_value*>(other.get());
|
||||
return !dynamic_pointer_cast<set_value>(std::move(other));
|
||||
}
|
||||
|
||||
sstring
|
||||
@@ -120,13 +120,13 @@ operation::set_field::prepare(database& db, const sstring& keyspace, const colum
|
||||
}
|
||||
|
||||
bool
|
||||
operation::set_field::is_compatible_with(const std::unique_ptr<raw_update>& other) const {
|
||||
auto x = dynamic_cast<const set_field*>(other.get());
|
||||
operation::set_field::is_compatible_with(shared_ptr<raw_update> other) const {
|
||||
auto x = dynamic_pointer_cast<set_field>(other);
|
||||
if (x) {
|
||||
return _field != x->_field;
|
||||
}
|
||||
|
||||
return !dynamic_cast<const set_value*>(other.get());
|
||||
return !dynamic_pointer_cast<set_value>(std::move(other));
|
||||
}
|
||||
|
||||
const column_identifier::raw&
|
||||
@@ -185,8 +185,8 @@ operation::addition::prepare(database& db, const sstring& keyspace, const column
|
||||
}
|
||||
|
||||
bool
|
||||
operation::addition::is_compatible_with(const std::unique_ptr<raw_update>& other) const {
|
||||
return !dynamic_cast<const set_value*>(other.get());
|
||||
operation::addition::is_compatible_with(shared_ptr<raw_update> other) const {
|
||||
return !dynamic_pointer_cast<set_value>(other);
|
||||
}
|
||||
|
||||
sstring
|
||||
@@ -216,7 +216,7 @@ operation::subtraction::prepare(database& db, const sstring& keyspace, const col
|
||||
} else if (ctype->get_kind() == abstract_type::kind::map) {
|
||||
auto&& mtype = dynamic_pointer_cast<const map_type_impl>(ctype);
|
||||
// The value for a map subtraction is actually a set
|
||||
auto&& vr = make_lw_shared<column_specification>(
|
||||
auto&& vr = ::make_shared<column_specification>(
|
||||
receiver.column_specification->ks_name,
|
||||
receiver.column_specification->cf_name,
|
||||
receiver.column_specification->name,
|
||||
@@ -227,8 +227,8 @@ operation::subtraction::prepare(database& db, const sstring& keyspace, const col
|
||||
}
|
||||
|
||||
bool
|
||||
operation::subtraction::is_compatible_with(const std::unique_ptr<raw_update>& other) const {
|
||||
return !dynamic_cast<const set_value*>(other.get());
|
||||
operation::subtraction::is_compatible_with(shared_ptr<raw_update> other) const {
|
||||
return !dynamic_pointer_cast<set_value>(other);
|
||||
}
|
||||
|
||||
sstring
|
||||
@@ -250,8 +250,8 @@ operation::prepend::prepare(database& db, const sstring& keyspace, const column_
|
||||
}
|
||||
|
||||
bool
|
||||
operation::prepend::is_compatible_with(const std::unique_ptr<raw_update>& other) const {
|
||||
return !dynamic_cast<const set_value*>(other.get());
|
||||
operation::prepend::is_compatible_with(shared_ptr<raw_update> other) const {
|
||||
return !dynamic_pointer_cast<set_value>(other);
|
||||
}
|
||||
|
||||
|
||||
@@ -294,7 +294,7 @@ operation::set_counter_value_from_tuple_list::prepare(database& db, const sstrin
|
||||
|
||||
// We need to fake a column of list<tuple<...>> to prepare the value term
|
||||
auto & os = receiver.column_specification;
|
||||
auto spec = make_lw_shared<cql3::column_specification>(os->ks_name, os->cf_name, os->name, counter_tuple_list_type);
|
||||
auto spec = ::make_shared<cql3::column_specification>(os->ks_name, os->cf_name, os->name, counter_tuple_list_type);
|
||||
auto v = _value->prepare(db, keyspace, spec);
|
||||
|
||||
// Will not be used elsewhere, so make it local.
|
||||
@@ -356,7 +356,7 @@ operation::set_counter_value_from_tuple_list::prepare(database& db, const sstrin
|
||||
};
|
||||
|
||||
bool
|
||||
operation::set_value::is_compatible_with(const std::unique_ptr<raw_update>& other) const {
|
||||
operation::set_value::is_compatible_with(::shared_ptr <raw_update> other) const {
|
||||
// We don't allow setting multiple time the same column, because 1)
|
||||
// it's stupid and 2) the result would seem random to the user.
|
||||
return false;
|
||||
|
||||
@@ -168,7 +168,7 @@ public:
|
||||
* @return whether this operation can be applied alongside the {@code
|
||||
* other} update (in the same UPDATE statement for the same column).
|
||||
*/
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const = 0;
|
||||
virtual bool is_compatible_with(::shared_ptr<raw_update> other) const = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -181,7 +181,7 @@ public:
|
||||
*/
|
||||
class raw_deletion {
|
||||
public:
|
||||
virtual ~raw_deletion() = default;
|
||||
~raw_deletion() {}
|
||||
|
||||
/**
|
||||
* The name of the column affected by this delete operation.
|
||||
@@ -218,7 +218,7 @@ public:
|
||||
|
||||
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const override;
|
||||
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const override;
|
||||
virtual bool is_compatible_with(shared_ptr<raw_update> other) const override;
|
||||
};
|
||||
|
||||
// Set a single field inside a user-defined type.
|
||||
@@ -234,7 +234,7 @@ public:
|
||||
|
||||
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const override;
|
||||
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const override;
|
||||
virtual bool is_compatible_with(shared_ptr<raw_update> other) const override;
|
||||
};
|
||||
|
||||
// Delete a single field inside a user-defined type.
|
||||
@@ -263,7 +263,7 @@ public:
|
||||
|
||||
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const override;
|
||||
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const override;
|
||||
virtual bool is_compatible_with(shared_ptr<raw_update> other) const override;
|
||||
};
|
||||
|
||||
class subtraction : public raw_update {
|
||||
@@ -277,7 +277,7 @@ public:
|
||||
|
||||
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const override;
|
||||
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const override;
|
||||
virtual bool is_compatible_with(shared_ptr<raw_update> other) const override;
|
||||
};
|
||||
|
||||
class prepend : public raw_update {
|
||||
@@ -291,7 +291,7 @@ public:
|
||||
|
||||
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) const override;
|
||||
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const override;
|
||||
virtual bool is_compatible_with(shared_ptr<raw_update> other) const override;
|
||||
};
|
||||
|
||||
class column_deletion;
|
||||
|
||||
@@ -65,7 +65,7 @@ public:
|
||||
}
|
||||
#endif
|
||||
|
||||
virtual bool is_compatible_with(const std::unique_ptr<raw_update>& other) const override;
|
||||
virtual bool is_compatible_with(::shared_ptr <raw_update> other) const override;
|
||||
};
|
||||
|
||||
class operation::set_counter_value_from_tuple_list : public set_value {
|
||||
|
||||
@@ -189,7 +189,7 @@ bytes_view query_options::linearize(fragmented_temporary_buffer::view view) cons
|
||||
}
|
||||
}
|
||||
|
||||
void query_options::prepare(const std::vector<lw_shared_ptr<column_specification>>& specs)
|
||||
void query_options::prepare(const std::vector<::shared_ptr<column_specification>>& specs)
|
||||
{
|
||||
if (!_names) {
|
||||
return;
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <concepts>
|
||||
#include <seastar/util/gcc6-concepts.hh>
|
||||
#include "timestamp.hh"
|
||||
#include "bytes.hh"
|
||||
#include "db/consistency_level_type.hh"
|
||||
@@ -97,11 +97,11 @@ private:
|
||||
* @param values_ranges a vector of values ranges for each statement in the batch.
|
||||
*/
|
||||
template<typename OneMutationDataRange>
|
||||
requires requires (OneMutationDataRange range) {
|
||||
GCC6_CONCEPT( requires requires (OneMutationDataRange range) {
|
||||
std::begin(range);
|
||||
std::end(range);
|
||||
} && ( requires (OneMutationDataRange range) { { *range.begin() } -> std::convertible_to<raw_value_view>; } ||
|
||||
requires (OneMutationDataRange range) { { *range.begin() } -> std::convertible_to<raw_value>; } )
|
||||
} && ( requires (OneMutationDataRange range) { { *range.begin() } -> raw_value_view; } ||
|
||||
requires (OneMutationDataRange range) { { *range.begin() } -> raw_value; } ) )
|
||||
explicit query_options(query_options&& o, std::vector<OneMutationDataRange> values_ranges);
|
||||
|
||||
public:
|
||||
@@ -145,11 +145,11 @@ public:
|
||||
* @param values_ranges a vector of values ranges for each statement in the batch.
|
||||
*/
|
||||
template<typename OneMutationDataRange>
|
||||
requires requires (OneMutationDataRange range) {
|
||||
GCC6_CONCEPT( requires requires (OneMutationDataRange range) {
|
||||
std::begin(range);
|
||||
std::end(range);
|
||||
} && ( requires (OneMutationDataRange range) { { *range.begin() } -> std::convertible_to<raw_value_view>; } ||
|
||||
requires (OneMutationDataRange range) { { *range.begin() } -> std::convertible_to<raw_value>; } )
|
||||
} && ( requires (OneMutationDataRange range) { { *range.begin() } -> raw_value_view; } ||
|
||||
requires (OneMutationDataRange range) { { *range.begin() } -> raw_value; } ) )
|
||||
static query_options make_batch_options(query_options&& o, std::vector<OneMutationDataRange> values_ranges) {
|
||||
return query_options(std::move(o), std::move(values_ranges));
|
||||
}
|
||||
@@ -245,17 +245,17 @@ public:
|
||||
return _cql_config;
|
||||
}
|
||||
|
||||
void prepare(const std::vector<lw_shared_ptr<column_specification>>& specs);
|
||||
void prepare(const std::vector<::shared_ptr<column_specification>>& specs);
|
||||
private:
|
||||
void fill_value_views();
|
||||
};
|
||||
|
||||
template<typename OneMutationDataRange>
|
||||
requires requires (OneMutationDataRange range) {
|
||||
GCC6_CONCEPT( requires requires (OneMutationDataRange range) {
|
||||
std::begin(range);
|
||||
std::end(range);
|
||||
} && ( requires (OneMutationDataRange range) { { *range.begin() } -> std::convertible_to<raw_value_view>; } ||
|
||||
requires (OneMutationDataRange range) { { *range.begin() } -> std::convertible_to<raw_value>; } )
|
||||
} && ( requires (OneMutationDataRange range) { { *range.begin() } -> raw_value_view; } ||
|
||||
requires (OneMutationDataRange range) { { *range.begin() } -> raw_value; } ) )
|
||||
query_options::query_options(query_options&& o, std::vector<OneMutationDataRange> values_ranges)
|
||||
: query_options(std::move(o))
|
||||
{
|
||||
|
||||
@@ -510,7 +510,7 @@ query_processor::execute_prepared(
|
||||
if (needs_authorization) {
|
||||
fut = statement->check_access(_proxy, query_state.get_client_state()).then([this, &query_state, prepared = std::move(prepared), cache_key = std::move(cache_key)] () mutable {
|
||||
return _authorized_prepared_cache.insert(*query_state.get_client_state().user(), std::move(cache_key), std::move(prepared)).handle_exception([this] (auto eptr) {
|
||||
log.error("failed to cache the entry: {}", eptr);
|
||||
log.error("failed to cache the entry", eptr);
|
||||
});
|
||||
});
|
||||
}
|
||||
@@ -562,6 +562,27 @@ query_processor::prepare(sstring query_string, const service::client_state& clie
|
||||
}
|
||||
}
|
||||
|
||||
::shared_ptr<cql_transport::messages::result_message::prepared>
|
||||
query_processor::get_stored_prepared_statement(
|
||||
const std::string_view& query_string,
|
||||
const sstring& keyspace,
|
||||
bool for_thrift) {
|
||||
using namespace cql_transport::messages;
|
||||
if (for_thrift) {
|
||||
return get_stored_prepared_statement_one<result_message::prepared::thrift>(
|
||||
query_string,
|
||||
keyspace,
|
||||
compute_thrift_id,
|
||||
prepared_cache_key_type::thrift_id);
|
||||
} else {
|
||||
return get_stored_prepared_statement_one<result_message::prepared::cql>(
|
||||
query_string,
|
||||
keyspace,
|
||||
compute_id,
|
||||
prepared_cache_key_type::cql_id);
|
||||
}
|
||||
}
|
||||
|
||||
static std::string hash_target(std::string_view query_string, std::string_view keyspace) {
|
||||
std::string ret(keyspace);
|
||||
ret += query_string;
|
||||
@@ -586,10 +607,10 @@ prepared_cache_key_type query_processor::compute_thrift_id(
|
||||
|
||||
std::unique_ptr<prepared_statement>
|
||||
query_processor::get_statement(const sstring_view& query, const service::client_state& client_state) {
|
||||
std::unique_ptr<raw::parsed_statement> statement = parse_statement(query);
|
||||
::shared_ptr<raw::parsed_statement> statement = parse_statement(query);
|
||||
|
||||
// Set keyspace for statement that require login
|
||||
auto cf_stmt = dynamic_cast<raw::cf_statement*>(statement.get());
|
||||
auto cf_stmt = dynamic_pointer_cast<raw::cf_statement>(statement);
|
||||
if (cf_stmt) {
|
||||
cf_stmt->prepare_keyspace(client_state);
|
||||
}
|
||||
@@ -599,7 +620,7 @@ query_processor::get_statement(const sstring_view& query, const service::client_
|
||||
return p;
|
||||
}
|
||||
|
||||
std::unique_ptr<raw::parsed_statement>
|
||||
::shared_ptr<raw::parsed_statement>
|
||||
query_processor::parse_statement(const sstring_view& query) {
|
||||
try {
|
||||
auto statement = util::do_with_parser(query, std::mem_fn(&cql3_parser::CqlParser::query));
|
||||
@@ -832,7 +853,7 @@ query_processor::execute_batch(
|
||||
return batch->check_access(_proxy, query_state.get_client_state()).then([this, &query_state, &options, batch, pending_authorization_entries = std::move(pending_authorization_entries)] () mutable {
|
||||
return parallel_for_each(pending_authorization_entries, [this, &query_state] (auto& e) {
|
||||
return _authorized_prepared_cache.insert(*query_state.get_client_state().user(), e.first, std::move(e.second)).handle_exception([this] (auto eptr) {
|
||||
log.error("failed to cache the entry: {}", eptr);
|
||||
log.error("failed to cache the entry", eptr);
|
||||
});
|
||||
}).then([this, &query_state, &options, batch] {
|
||||
batch->validate();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user