Compare commits
282 Commits
next-4.1
...
alternator
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aac47f494b | ||
|
|
d93df8c184 | ||
|
|
48e22c2910 | ||
|
|
bbb6689938 | ||
|
|
6c252feb0c | ||
|
|
33d5015edf | ||
|
|
0be2f17952 | ||
|
|
a74d5fcce8 | ||
|
|
8e9e7cc952 | ||
|
|
ce5db951be | ||
|
|
5a9b9bdf4d | ||
|
|
518f01099c | ||
|
|
db617b459c | ||
|
|
72ac19623a | ||
|
|
eb1cc6610c | ||
|
|
24789b549a | ||
|
|
e11ba56dbc | ||
|
|
a2d68eac4c | ||
|
|
c14fc8d854 | ||
|
|
81dcee8961 | ||
|
|
6aa840543c | ||
|
|
84598b39b9 | ||
|
|
287ba6ddde | ||
|
|
a700da216d | ||
|
|
a6d2117a77 | ||
|
|
c46458f61d | ||
|
|
dc09d96b22 | ||
|
|
350180125d | ||
|
|
78a153518a | ||
|
|
70d4afc637 | ||
|
|
fea29fdfd1 | ||
|
|
93f1072cee | ||
|
|
edd03312ed | ||
|
|
faad21f72b | ||
|
|
298f5caa38 | ||
|
|
b6662bac35 | ||
|
|
707a70235e | ||
|
|
c223fafba3 | ||
|
|
7a749a4772 | ||
|
|
5fc1e70751 | ||
|
|
93fbbbc202 | ||
|
|
cfcf841c0d | ||
|
|
c0c4d90df9 | ||
|
|
86e80ea320 | ||
|
|
6d62ed213a | ||
|
|
05920f7c3b | ||
|
|
9390dd41d0 | ||
|
|
a795c290f3 | ||
|
|
d4b19f55b2 | ||
|
|
5e8644413e | ||
|
|
40e3b7a0b6 | ||
|
|
76c3e4e0d6 | ||
|
|
9567691551 | ||
|
|
2220c8c681 | ||
|
|
b2fa0c9c1c | ||
|
|
ace1ffc8da | ||
|
|
6b8224ef85 | ||
|
|
2e68eafb22 | ||
|
|
3044f71438 | ||
|
|
0af6ab6fc6 | ||
|
|
70ae5cc235 | ||
|
|
c9345d8a0e | ||
|
|
587b38cd69 | ||
|
|
7d68d5030d | ||
|
|
c49e009e3e | ||
|
|
eebb2f0a0f | ||
|
|
fd10eee1ae | ||
|
|
28b3819c23 | ||
|
|
656f62722b | ||
|
|
ecb571d7e3 | ||
|
|
dd4638d499 | ||
|
|
aaf559c4f9 | ||
|
|
f7d0ca3c92 | ||
|
|
8394225741 | ||
|
|
091b1b40c2 | ||
|
|
b914ba11fa | ||
|
|
1b2b2c7009 | ||
|
|
188c6a552a | ||
|
|
b8964ab0ba | ||
|
|
c4fd846dbb | ||
|
|
338b7e9e67 | ||
|
|
154d1649c6 | ||
|
|
5620a46024 | ||
|
|
fc9744791c | ||
|
|
e0b01a0233 | ||
|
|
b3bf2fab2e | ||
|
|
05b895ca84 | ||
|
|
6b145b59d3 | ||
|
|
76bc30a82d | ||
|
|
972474a215 | ||
|
|
3342ebff22 | ||
|
|
cd2c581c7c | ||
|
|
e19a7f908e | ||
|
|
e9f1540de1 | ||
|
|
eb678ed63a | ||
|
|
ebdd4022cf | ||
|
|
d6a8626e90 | ||
|
|
62858c8466 | ||
|
|
3716d23ce4 | ||
|
|
1611b5dd4f | ||
|
|
105533c046 | ||
|
|
7c23e23e7d | ||
|
|
a5057a3b6e | ||
|
|
8d8baccdc4 | ||
|
|
f8c7a2e0b8 | ||
|
|
99fd032b1f | ||
|
|
7984050054 | ||
|
|
d7f75b405b | ||
|
|
1d19934bc6 | ||
|
|
493890c6f6 | ||
|
|
e550a666e3 | ||
|
|
ea62ce67f4 | ||
|
|
7ce0a30766 | ||
|
|
d141c3b5bd | ||
|
|
a6ca5e19e4 | ||
|
|
482ac08a45 | ||
|
|
7efa1aa48a | ||
|
|
196efbba13 | ||
|
|
19111136c3 | ||
|
|
87626beaae | ||
|
|
b5bbdc18e4 | ||
|
|
34833707be | ||
|
|
cb9cb0e58e | ||
|
|
a69455df2b | ||
|
|
6435895e2f | ||
|
|
3c7b07c20c | ||
|
|
f9063208e9 | ||
|
|
99d7a34c00 | ||
|
|
3212167ac4 | ||
|
|
33300fd30c | ||
|
|
0372ce0649 | ||
|
|
43049bbec0 | ||
|
|
80163a67a2 | ||
|
|
f0448c67b0 | ||
|
|
94097b98d4 | ||
|
|
7062b19461 | ||
|
|
7af4304592 | ||
|
|
f2fac82bf0 | ||
|
|
30aea6d942 | ||
|
|
e3b1e2860c | ||
|
|
bb169ff51c | ||
|
|
76fa348dd9 | ||
|
|
2933648013 | ||
|
|
2a2e2a5b3b | ||
|
|
3135b9b5a5 | ||
|
|
8d117c0f25 | ||
|
|
9cb2bf0820 | ||
|
|
ddbcdd9736 | ||
|
|
c66f32a4ff | ||
|
|
de284b1111 | ||
|
|
a0825043f4 | ||
|
|
7107923e1e | ||
|
|
a6b007b753 | ||
|
|
3909370146 | ||
|
|
0230c04702 | ||
|
|
d5a92727d2 | ||
|
|
042e087066 | ||
|
|
dcd271393e | ||
|
|
f2767d973b | ||
|
|
51a1a01605 | ||
|
|
067f00050e | ||
|
|
9fc44d2f0e | ||
|
|
67c86461cb | ||
|
|
d018539b07 | ||
|
|
1e488458b2 | ||
|
|
b4b6a377e4 | ||
|
|
90b8870a16 | ||
|
|
00ed12e56c | ||
|
|
04610f5299 | ||
|
|
ef58e3fc1e | ||
|
|
21c7e53c1c | ||
|
|
66cad2975f | ||
|
|
bd251e81df | ||
|
|
f807e2792d | ||
|
|
d1942091d7 | ||
|
|
01ff0d1ba8 | ||
|
|
19c855287c | ||
|
|
4c2c920250 | ||
|
|
e6f1841756 | ||
|
|
d4ad58f353 | ||
|
|
102034a742 | ||
|
|
d914f37a08 | ||
|
|
c63fefd37b | ||
|
|
c2cf2ab99b | ||
|
|
41526884f1 | ||
|
|
2e703e6b73 | ||
|
|
ecd585ef59 | ||
|
|
cd040d6674 | ||
|
|
a1dbde66fd | ||
|
|
67c35cde40 | ||
|
|
18b2f656f2 | ||
|
|
90d7e6673e | ||
|
|
182450623a | ||
|
|
f14c7f8200 | ||
|
|
b81fbabe37 | ||
|
|
9c934b64f8 | ||
|
|
4ad39f714f | ||
|
|
f56a0fbcd9 | ||
|
|
296c2566c5 | ||
|
|
d760097dad | ||
|
|
523c5ee159 | ||
|
|
8146eb6027 | ||
|
|
725c18bf6c | ||
|
|
c8270831ec | ||
|
|
0987916542 | ||
|
|
ec23a14f82 | ||
|
|
7e4f7a20dc | ||
|
|
930234e48e | ||
|
|
9055096021 | ||
|
|
65934ada59 | ||
|
|
b28d3cdb5d | ||
|
|
cf58acc23f | ||
|
|
75c3f33a8c | ||
|
|
be516a080a | ||
|
|
176b7dfd17 | ||
|
|
30d4b4e689 | ||
|
|
3b73c49ac8 | ||
|
|
b556356a7d | ||
|
|
90c12b4ea3 | ||
|
|
287a986715 | ||
|
|
9b46c6ac2d | ||
|
|
f0436aeecc | ||
|
|
e3ba65003d | ||
|
|
d25a07a6c0 | ||
|
|
426f53bc89 | ||
|
|
1d85558d47 | ||
|
|
1c7b1ac165 | ||
|
|
c4c71989bf | ||
|
|
4dad76a6a7 | ||
|
|
323268e9ab | ||
|
|
a441ad9360 | ||
|
|
d04a5b01c3 | ||
|
|
4da8171b42 | ||
|
|
fbc6f222b8 | ||
|
|
31cce0323e | ||
|
|
f3d1cefe3e | ||
|
|
8bf6e963f2 | ||
|
|
ac65f91b5d | ||
|
|
c7d7c1e50d | ||
|
|
829f5fe359 | ||
|
|
5f9409af68 | ||
|
|
f28680ec5c | ||
|
|
a7175ddd44 | ||
|
|
90f56c32b0 | ||
|
|
2808f7ae3f | ||
|
|
ac278d67ca | ||
|
|
53a9567804 | ||
|
|
5a751ddcff | ||
|
|
dae70c892f | ||
|
|
4cbb40d5d8 | ||
|
|
ae779ade37 | ||
|
|
8839b22277 | ||
|
|
35bc488f5b | ||
|
|
c85dcfb71d | ||
|
|
a37b334e16 | ||
|
|
d52e7de7be | ||
|
|
dd884b5552 | ||
|
|
6c6c5a37a1 | ||
|
|
4ff599b21f | ||
|
|
6178694b0e | ||
|
|
700a3bb7be | ||
|
|
eb13166fb5 | ||
|
|
325773d65b | ||
|
|
4e12a4f212 | ||
|
|
3600a096be | ||
|
|
9fbf601f44 | ||
|
|
7b400913fc | ||
|
|
623fd5d10d | ||
|
|
8c9e88f77d | ||
|
|
4f3b0af6d0 | ||
|
|
830805e608 | ||
|
|
b87057eb08 | ||
|
|
53ac8dfb0a | ||
|
|
8a19ae8e39 | ||
|
|
41775a6e5d | ||
|
|
7adde41f55 | ||
|
|
c926f1bb53 | ||
|
|
e6ff060944 | ||
|
|
b5fa4ce3f3 | ||
|
|
6e697e4cca | ||
|
|
b2b7ae4e41 | ||
|
|
fca1e655b4 |
@@ -1,4 +1,3 @@
|
||||
.git
|
||||
build
|
||||
seastar/build
|
||||
testlog
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -19,8 +19,3 @@ CMakeLists.txt.user
|
||||
__pycache__CMakeLists.txt.user
|
||||
.gdbinit
|
||||
resources
|
||||
.pytest_cache
|
||||
/expressions.tokens
|
||||
tags
|
||||
testlog/*
|
||||
test/*/*.reject
|
||||
|
||||
8
.gitmodules
vendored
8
.gitmodules
vendored
@@ -1,17 +1,17 @@
|
||||
[submodule "seastar"]
|
||||
path = seastar
|
||||
url = ../scylla-seastar
|
||||
url = ../seastar
|
||||
ignore = dirty
|
||||
[submodule "swagger-ui"]
|
||||
path = swagger-ui
|
||||
url = ../scylla-swagger-ui
|
||||
ignore = dirty
|
||||
[submodule "xxHash"]
|
||||
path = xxHash
|
||||
url = ../xxHash
|
||||
[submodule "libdeflate"]
|
||||
path = libdeflate
|
||||
url = ../libdeflate
|
||||
[submodule "zstd"]
|
||||
path = zstd
|
||||
url = ../zstd
|
||||
[submodule "abseil"]
|
||||
path = abseil
|
||||
url = ../abseil-cpp
|
||||
|
||||
@@ -5,25 +5,13 @@
|
||||
cmake_minimum_required(VERSION 3.7)
|
||||
project(scylla)
|
||||
|
||||
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
message(STATUS "Setting build type to 'Release' as none was specified.")
|
||||
set(CMAKE_BUILD_TYPE "Release" CACHE
|
||||
STRING "Choose the type of build." FORCE)
|
||||
# Set the possible values of build type for cmake-gui
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS
|
||||
"Debug" "Release" "Dev" "Sanitize")
|
||||
endif()
|
||||
|
||||
if(CMAKE_BUILD_TYPE)
|
||||
string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE)
|
||||
else()
|
||||
set(BUILD_TYPE "release")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED FOR_IDE AND NOT DEFINED ENV{FOR_IDE} AND NOT DEFINED ENV{CLION_IDE})
|
||||
message(FATAL_ERROR "This CMakeLists.txt file is only valid for use in IDEs, please define FOR_IDE to acknowledge this.")
|
||||
endif()
|
||||
|
||||
# Default value. A more accurate list is populated through `pkg-config` below if `seastar.pc` is available.
|
||||
set(SEASTAR_INCLUDE_DIRS "seastar")
|
||||
|
||||
# These paths are always available, since they're included in the repository. Additional DPDK headers are placed while
|
||||
# Seastar is built, and are captured in `SEASTAR_INCLUDE_DIRS` through parsing the Seastar pkg-config file (below).
|
||||
set(SEASTAR_DPDK_INCLUDE_DIRS
|
||||
@@ -34,14 +22,9 @@ set(SEASTAR_DPDK_INCLUDE_DIRS
|
||||
|
||||
find_package(PkgConfig REQUIRED)
|
||||
|
||||
set(ENV{PKG_CONFIG_PATH} "${CMAKE_SOURCE_DIR}/build/${BUILD_TYPE}/seastar:$ENV{PKG_CONFIG_PATH}")
|
||||
set(ENV{PKG_CONFIG_PATH} "${CMAKE_SOURCE_DIR}/seastar/build/release:$ENV{PKG_CONFIG_PATH}")
|
||||
pkg_check_modules(SEASTAR seastar)
|
||||
|
||||
if(NOT SEASTAR_INCLUDE_DIRS)
|
||||
# Default value. A more accurate list is populated through `pkg-config` below if `seastar.pc` is available.
|
||||
set(SEASTAR_INCLUDE_DIRS "seastar/include")
|
||||
endif()
|
||||
|
||||
find_package(Boost COMPONENTS filesystem program_options system thread)
|
||||
|
||||
##
|
||||
@@ -87,7 +70,7 @@ scan_scylla_source_directories(
|
||||
seastar/json
|
||||
seastar/net
|
||||
seastar/rpc
|
||||
seastar/testing
|
||||
seastar/tests
|
||||
seastar/util)
|
||||
|
||||
scan_scylla_source_directories(
|
||||
@@ -114,7 +97,7 @@ scan_scylla_source_directories(
|
||||
service
|
||||
sstables
|
||||
streaming
|
||||
test
|
||||
tests
|
||||
thrift
|
||||
tracing
|
||||
transport
|
||||
@@ -123,7 +106,7 @@ scan_scylla_source_directories(
|
||||
scan_scylla_source_directories(
|
||||
VAR SCYLLA_GEN_SOURCE_FILES
|
||||
RECURSIVE
|
||||
PATHS build/${BUILD_TYPE}/gen)
|
||||
PATHS build/release/gen)
|
||||
|
||||
set(SCYLLA_SOURCE_FILES
|
||||
${SCYLLA_ROOT_SOURCE_FILES}
|
||||
@@ -156,4 +139,4 @@ target_include_directories(scylla PUBLIC
|
||||
${Boost_INCLUDE_DIRS}
|
||||
xxhash
|
||||
libdeflate
|
||||
build/${BUILD_TYPE}/gen)
|
||||
build/release/gen)
|
||||
|
||||
@@ -56,7 +56,7 @@ $ ./configure.py --help
|
||||
|
||||
The most important option is:
|
||||
|
||||
- `--enable-dpdk`: [DPDK](http://dpdk.org/) is a set of libraries and drivers for fast packet processing. During development, it's not necessary to enable support even if it is supported by your platform.
|
||||
- `--{enable,disable}-dpdk`: [DPDK](http://dpdk.org/) is a set of libraries and drivers for fast packet processing. During development, it's not necessary to enable support even if it is supported by your platform.
|
||||
|
||||
Source files and build targets are tracked manually in `configure.py`, so the script needs to be updated when new files or targets are added or removed.
|
||||
|
||||
@@ -141,7 +141,7 @@ In v3:
|
||||
"Tests: unit ({mode}), dtest ({smp})"
|
||||
```
|
||||
|
||||
The usual is "Tests: unit (dev)", although running debug tests is encouraged.
|
||||
The usual is "Tests: unit (release)", although running debug tests is encouraged.
|
||||
|
||||
5. When answering review comments, prefer inline quotes as they make it easier to track the conversation across multiple e-mails.
|
||||
|
||||
|
||||
31
MAINTAINERS
31
MAINTAINERS
@@ -5,6 +5,8 @@ F: Filename, directory, or pattern for the subsystem
|
||||
---
|
||||
|
||||
AUTH
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Calle Wilund <calle@scylladb.com>
|
||||
R: Vlad Zolotarov <vladz@scylladb.com>
|
||||
R: Jesse Haber-Kucharsky <jhaberku@scylladb.com>
|
||||
@@ -12,17 +14,22 @@ F: auth/*
|
||||
|
||||
CACHE
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
R: Piotr Jastrzebski <piotr@scylladb.com>
|
||||
F: row_cache*
|
||||
F: *mutation*
|
||||
F: tests/mvcc*
|
||||
|
||||
COMMITLOG / BATCHLOGa
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Calle Wilund <calle@scylladb.com>
|
||||
F: db/commitlog/*
|
||||
F: db/batch*
|
||||
|
||||
COORDINATOR
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Gleb Natapov <gleb@scylladb.com>
|
||||
F: service/storage_proxy*
|
||||
|
||||
@@ -42,10 +49,12 @@ M: Pekka Enberg <penberg@scylladb.com>
|
||||
F: cql3/*
|
||||
|
||||
COUNTERS
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
F: counters*
|
||||
F: tests/counter_test*
|
||||
|
||||
GOSSIP
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
R: Asias He <asias@scylladb.com>
|
||||
F: gms/*
|
||||
@@ -56,11 +65,14 @@ F: dist/docker/*
|
||||
|
||||
LSA
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
F: utils/logalloc*
|
||||
|
||||
MATERIALIZED VIEWS
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
M: Pekka Enberg <penberg@scylladb.com>
|
||||
M: Nadav Har'El <nyh@scylladb.com>
|
||||
R: Nadav Har'El <nyh@scylladb.com>
|
||||
R: Duarte Nunes <duarte@scylladb.com>
|
||||
F: db/view/*
|
||||
F: cql3/statements/*view*
|
||||
|
||||
@@ -70,12 +82,14 @@ F: dist/*
|
||||
|
||||
REPAIR
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Asias He <asias@scylladb.com>
|
||||
R: Nadav Har'El <nyh@scylladb.com>
|
||||
F: repair/*
|
||||
|
||||
SCHEMA MANAGEMENT
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
M: Pekka Enberg <penberg@scylladb.com>
|
||||
F: db/schema_tables*
|
||||
F: db/legacy_schema_migrator*
|
||||
@@ -84,13 +98,15 @@ F: schema*
|
||||
|
||||
SECONDARY INDEXES
|
||||
M: Pekka Enberg <penberg@scylladb.com>
|
||||
M: Nadav Har'El <nyh@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Nadav Har'El <nyh@scylladb.com>
|
||||
R: Pekka Enberg <penberg@scylladb.com>
|
||||
F: db/index/*
|
||||
F: cql3/statements/*index*
|
||||
|
||||
SSTABLES
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Raphael S. Carvalho <raphaelsc@scylladb.com>
|
||||
R: Glauber Costa <glauber@scylladb.com>
|
||||
R: Nadav Har'El <nyh@scylladb.com>
|
||||
@@ -98,17 +114,18 @@ F: sstables/*
|
||||
|
||||
STREAMING
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
R: Asias He <asias@scylladb.com>
|
||||
F: streaming/*
|
||||
F: service/storage_service.*
|
||||
|
||||
ALTERNATOR
|
||||
M: Nadav Har'El <nyh@scylladb.com>
|
||||
F: alternator/*
|
||||
F: alternator-test/*
|
||||
THRIFT TRANSPORT LAYER
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
F: thrift/*
|
||||
|
||||
THE REST
|
||||
M: Avi Kivity <avi@scylladb.com>
|
||||
M: Paweł Dziepak <pdziepak@scylladb.com>
|
||||
M: Duarte Nunes <duarte@scylladb.com>
|
||||
M: Tomasz Grabiec <tgrabiec@scylladb.com>
|
||||
M: Nadav Har'El <nyh@scylladb.com>
|
||||
F: *
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
This project includes code developed by the Apache Software Foundation (http://www.apache.org/),
|
||||
especially Apache Cassandra.
|
||||
|
||||
It includes files from https://github.com/antonblanchard/crc32-vpmsum (author Anton Blanchard <anton@au.ibm.com>, IBM).
|
||||
It also includes files from https://github.com/antonblanchard/crc32-vpmsum (author Anton Blanchard <anton@au.ibm.com>, IBM).
|
||||
These files are located in utils/arch/powerpc/crc32-vpmsum. Their license may be found in licenses/LICENSE-crc32-vpmsum.TXT.
|
||||
|
||||
It includes modified code from https://gitbox.apache.org/repos/asf?p=cassandra-dtest.git (owned by The Apache Software Foundation)
|
||||
|
||||
29
README-DPDK.md
Normal file
29
README-DPDK.md
Normal file
@@ -0,0 +1,29 @@
|
||||
Seastar and DPDK
|
||||
================
|
||||
|
||||
Seastar uses the Data Plane Development Kit to drive NIC hardware directly. This
|
||||
provides an enormous performance boost.
|
||||
|
||||
To enable DPDK, specify `--enable-dpdk` to `./configure.py`, and `--dpdk-pmd` as a
|
||||
run-time parameter. This will use the DPDK package provided as a git submodule with the
|
||||
seastar sources.
|
||||
|
||||
To use your own self-compiled DPDK package, follow this procedure:
|
||||
|
||||
1. Setup host to compile DPDK:
|
||||
- Ubuntu
|
||||
`sudo apt-get install -y build-essential linux-image-extra-$(uname -r)`
|
||||
2. Prepare a DPDK SDK:
|
||||
- Download the latest DPDK release: `wget http://dpdk.org/browse/dpdk/snapshot/dpdk-1.8.0.tar.gz`
|
||||
- Untar it.
|
||||
- Edit config/common_linuxapp: set CONFIG_RTE_MBUF_REFCNT and CONFIG_RTE_LIBRTE_KNI to 'n'.
|
||||
- For DPDK 1.7.x: edit config/common_linuxapp:
|
||||
- Set CONFIG_RTE_LIBRTE_PMD_BOND to 'n'.
|
||||
- Set CONFIG_RTE_MBUF_SCATTER_GATHER to 'n'.
|
||||
- Set CONFIG_RTE_LIBRTE_IP_FRAG to 'n'.
|
||||
- Start the tools/setup.sh script as root.
|
||||
- Compile a linuxapp target (option 9).
|
||||
- Install IGB_UIO module (option 11).
|
||||
- Bind some physical port to IGB_UIO (option 17).
|
||||
- Configure hugepage mappings (option 14/15).
|
||||
3. Run a configure.py: `./configure.py --dpdk-target <Path to untared dpdk-1.8.0 above>/x86_64-native-linuxapp-gcc`.
|
||||
45
README.md
45
README.md
@@ -27,10 +27,10 @@ Please see [HACKING.md](HACKING.md) for detailed information on building and dev
|
||||
|
||||
```
|
||||
|
||||
* run Scylla with one CPU and ./tmp as work directory
|
||||
* run Scylla with one CPU and ./tmp as data directory
|
||||
|
||||
```
|
||||
./build/release/scylla --workdir tmp --smp 1
|
||||
./build/release/scylla --datadir tmp --commitlog-directory tmp --smp 1
|
||||
```
|
||||
|
||||
* For more run options:
|
||||
@@ -38,34 +38,31 @@ Please see [HACKING.md](HACKING.md) for detailed information on building and dev
|
||||
./build/release/scylla --help
|
||||
```
|
||||
|
||||
## Testing
|
||||
## Building Fedora RPM
|
||||
|
||||
See [test.py manual](docs/testing.md).
|
||||
As a pre-requisite, you need to install [Mock](https://fedoraproject.org/wiki/Mock) on your machine:
|
||||
|
||||
## Scylla APIs and compatibility
|
||||
By default, Scylla is compatible with Apache Cassandra and its APIs - CQL and
|
||||
Thrift. There is also experimental support for the API of Amazon DynamoDB,
|
||||
but being experimental it needs to be explicitly enabled to be used. For more
|
||||
information on how to enable the experimental DynamoDB compatibility in Scylla,
|
||||
and the current limitations of this feature, see
|
||||
[Alternator](docs/alternator/alternator.md) and
|
||||
[Getting started with Alternator](docs/alternator/getting-started.md).
|
||||
```
|
||||
# Install mock:
|
||||
sudo yum install mock
|
||||
|
||||
## Documentation
|
||||
# Add user to the "mock" group:
|
||||
usermod -a -G mock $USER && newgrp mock
|
||||
```
|
||||
|
||||
Documentation can be found in [./docs](./docs) and on the
|
||||
[wiki](https://github.com/scylladb/scylla/wiki). There is currently no clear
|
||||
definition of what goes where, so when looking for something be sure to check
|
||||
both.
|
||||
Seastar documentation can be found [here](http://docs.seastar.io/master/index.html).
|
||||
User documentation can be found [here](https://docs.scylladb.com/).
|
||||
Then, to build an RPM, run:
|
||||
|
||||
## Training
|
||||
```
|
||||
./dist/redhat/build_rpm.sh
|
||||
```
|
||||
|
||||
Training material and online courses can be found at [Scylla University](https://university.scylladb.com/).
|
||||
The courses are free, self-paced and include hands-on examples. They cover a variety of topics including Scylla data modeling,
|
||||
administration, architecture, basic NoSQL concepts, using drivers for application development, Scylla setup, failover, compactions,
|
||||
multi-datacenters and how Scylla integrates with third-party applications.
|
||||
The built RPM is stored in ``/var/lib/mock/<configuration>/result`` directory.
|
||||
For example, on Fedora 21 mock reports the following:
|
||||
|
||||
```
|
||||
INFO: Done(scylla-server-0.00-1.fc21.src.rpm) Config(default) 20 minutes 7 seconds
|
||||
INFO: Results and/or logs in: /var/lib/mock/fedora-21-x86_64/result
|
||||
```
|
||||
|
||||
## Building Fedora-based Docker image
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
|
||||
PRODUCT=scylla
|
||||
VERSION=4.1.11
|
||||
VERSION=666.development
|
||||
|
||||
if test -f version
|
||||
then
|
||||
@@ -19,14 +19,6 @@ else
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
fi
|
||||
|
||||
if [ -f build/SCYLLA-RELEASE-FILE ]; then
|
||||
RELEASE_FILE=$(cat build/SCYLLA-RELEASE-FILE)
|
||||
GIT_COMMIT_FILE=$(cat build/SCYLLA-RELEASE-FILE |cut -d . -f 3)
|
||||
if [ "$GIT_COMMIT" = "$GIT_COMMIT_FILE" ]; then
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
||||
mkdir -p build
|
||||
echo "$SCYLLA_VERSION" > build/SCYLLA-VERSION-FILE
|
||||
|
||||
1
abseil
1
abseil
Submodule abseil deleted from 2069dc796a
33
alternator-test/README.md
Normal file
33
alternator-test/README.md
Normal file
@@ -0,0 +1,33 @@
|
||||
Tests for Alternator that should also pass, identically, against DynamoDB.
|
||||
|
||||
Tests use the boto3 library for AWS API, and the pytest frameworks
|
||||
(both are available from Linux distributions, or with "pip install").
|
||||
|
||||
To run all tests against the local installation of Alternator on
|
||||
http://localhost:8000, just run `pytest`.
|
||||
|
||||
Some additional pytest options:
|
||||
* To run all tests in a single file, do `pytest test_table.py`.
|
||||
* To run a single specific test, do `pytest test_table.py::test_create_table_unsupported_names`.
|
||||
* Additional useful pytest options, especially useful for debugging tests:
|
||||
* -v: show the names of each individual test running instead of just dots.
|
||||
* -s: show the full output of running tests (by default, pytest captures the test's output and only displays it if a test fails)
|
||||
|
||||
Add the `--aws` option to test against AWS instead of the local installation.
|
||||
For example - `pytest --aws test_item.py` or `pytest --aws`.
|
||||
|
||||
If you plan to run tests against AWS and not just a local Scylla installation,
|
||||
the files ~/.aws/credentials should be configured with your AWS key:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id = XXXXXXXXXXXXXXXXXXXX
|
||||
aws_secret_access_key = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
```
|
||||
|
||||
and ~/.aws/config with the default region to use in the test:
|
||||
```
|
||||
[default]
|
||||
region = us-east-1
|
||||
```
|
||||
|
||||
@@ -26,14 +26,6 @@ import pytest
|
||||
import boto3
|
||||
from util import create_test_table
|
||||
|
||||
# When tests are run with HTTPS, the server often won't have its SSL
|
||||
# certificate signed by a known authority. So we will disable certificate
|
||||
# verification with the "verify=False" request option. However, once we do
|
||||
# that, we start getting scary-looking warning messages, saying that this
|
||||
# makes HTTPS insecure. The following silences those warnings:
|
||||
import urllib3
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
# Test that the Boto libraries are new enough. These tests want to test a
|
||||
# large variety of DynamoDB API features, and to do this we need a new-enough
|
||||
# version of the the Boto libraries (boto3 and botocore) so that they can
|
||||
@@ -51,11 +43,6 @@ if (LooseVersion(botocore.__version__) < LooseVersion('1.12.54')):
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--aws", action="store_true",
|
||||
help="run against AWS instead of a local Scylla installation")
|
||||
parser.addoption("--https", action="store_true",
|
||||
help="communicate via HTTPS protocol on port 8043 instead of HTTP when"
|
||||
" running against a local Scylla installation")
|
||||
parser.addoption("--url", action="store",
|
||||
help="communicate with given URL instead of defaults")
|
||||
|
||||
# "dynamodb" fixture: set up client object for communicating with the DynamoDB
|
||||
# API. Currently this chooses either Amazon's DynamoDB in the default region
|
||||
@@ -72,15 +59,8 @@ def dynamodb(request):
|
||||
# requires us to specify dummy region and credential parameters,
|
||||
# otherwise the user is forced to properly configure ~/.aws even
|
||||
# for local runs.
|
||||
if request.config.getoption('url') != None:
|
||||
local_url = request.config.getoption('url')
|
||||
else:
|
||||
local_url = 'https://localhost:8043' if request.config.getoption('https') else 'http://localhost:8000'
|
||||
# Disable verifying in order to be able to use self-signed TLS certificates
|
||||
verify = not request.config.getoption('https')
|
||||
return boto3.resource('dynamodb', endpoint_url=local_url, verify=verify,
|
||||
region_name='us-east-1', aws_access_key_id='alternator', aws_secret_access_key='secret_pass',
|
||||
config=botocore.client.Config(retries={"max_attempts": 3}))
|
||||
return boto3.resource('dynamodb', endpoint_url='http://localhost:8000',
|
||||
region_name='us-east-1', aws_access_key_id='whatever', aws_secret_access_key='whatever')
|
||||
|
||||
# "test_table" fixture: Create and return a temporary table to be used in tests
|
||||
# that need a table to work on. The table is automatically deleted at the end.
|
||||
@@ -125,15 +105,6 @@ def test_table_s(dynamodb):
|
||||
AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' } ])
|
||||
yield table
|
||||
table.delete()
|
||||
# test_table_s_2 has exactly the same schema as test_table_s, and is useful
|
||||
# for tests which need two different tables with the same schema.
|
||||
@pytest.fixture(scope="session")
|
||||
def test_table_s_2(dynamodb):
|
||||
table = create_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' }, ],
|
||||
AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' } ])
|
||||
yield table
|
||||
table.delete()
|
||||
@pytest.fixture(scope="session")
|
||||
def test_table_b(dynamodb):
|
||||
table = create_test_table(dynamodb,
|
||||
@@ -196,11 +167,3 @@ def filled_test_table(dynamodb):
|
||||
|
||||
yield table, items
|
||||
table.delete()
|
||||
|
||||
# The "scylla_only" fixture can be used by tests for Scylla-only features,
|
||||
# which do not exist on AWS DynamoDB. A test using this fixture will be
|
||||
# skipped if running with "--aws".
|
||||
@pytest.fixture(scope="session")
|
||||
def scylla_only(dynamodb):
|
||||
if dynamodb.meta.client._endpoint.host.endswith('.amazonaws.com'):
|
||||
pytest.skip('Scylla-only feature not supported by AWS')
|
||||
@@ -20,7 +20,6 @@
|
||||
# so they are actually tested by other tests as well.
|
||||
|
||||
import pytest
|
||||
import random
|
||||
from botocore.exceptions import ClientError
|
||||
from util import random_string, full_scan, full_query, multiset
|
||||
|
||||
@@ -45,19 +44,6 @@ def test_basic_batch_write_item(test_table):
|
||||
assert item['attribute'] == str(i)
|
||||
assert item['another'] == 'xyz'
|
||||
|
||||
# Try a batch which includes both multiple writes to the same partition
|
||||
# and several partitions. The LWT code collects multiple mutations to the
|
||||
# same partition together, and we want to test that this worked correctly.
|
||||
def test_batch_write_item_mixed(test_table):
|
||||
partitions = [random_string() for i in range(4)]
|
||||
items = [{'p': p, 'c': str(i)} for p in partitions for i in range(4)]
|
||||
with test_table.batch_writer() as batch:
|
||||
# Reorder items randomly, just for the heck of it
|
||||
for item in random.sample(items, len(items)):
|
||||
batch.put_item(item)
|
||||
for item in items:
|
||||
assert test_table.get_item(Key={'p': item['p'], 'c': item['c']}, ConsistentRead=True)['Item'] == item
|
||||
|
||||
# Test batch write to a table with only a hash key
|
||||
def test_batch_write_hash_only(test_table_s):
|
||||
items = [{'p': random_string(), 'val': random_string()} for i in range(10)]
|
||||
@@ -152,20 +138,6 @@ def test_batch_write_duplicate_write_and_delete(test_table_s, test_table):
|
||||
batch.put_item({'p': p, 'c': other})
|
||||
batch.put_item({'p': other, 'c': c})
|
||||
|
||||
# The BatchWriteIem API allows writing to more than one table in the same
|
||||
# batch. This test verifies that the duplicate-key checking doesn't mistake
|
||||
# updates to the same key in different tables to be duplicates.
|
||||
def test_batch_write_nonduplicate_multiple_tables(test_table_s, test_table_s_2):
|
||||
p = random_string()
|
||||
# The batch_writer() function used in previous tests can't write to more
|
||||
# than one table. So we use the lower level interface boto3 gives us.
|
||||
reply = test_table_s.meta.client.batch_write_item(RequestItems = {
|
||||
test_table_s.name: [{'PutRequest': {'Item': {'p': p, 'a': 'hi'}}}],
|
||||
test_table_s_2.name: [{'PutRequest': {'Item': {'p': p, 'b': 'hello'}}}]
|
||||
})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 'hi'}
|
||||
assert test_table_s_2.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'b': 'hello'}
|
||||
|
||||
# Test that BatchWriteItem's PutRequest completely replaces an existing item.
|
||||
# It shouldn't merge it with a previously existing value. See also the same
|
||||
# test for PutItem - test_put_item_replace().
|
||||
@@ -210,32 +182,6 @@ def test_batch_write_invalid_operation(test_table_s):
|
||||
for p in [p1, p2]:
|
||||
assert not 'item' in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)
|
||||
|
||||
# In test_item.py we have a bunch of test_empty_* tests on different ways to
|
||||
# create an empty item (which in Scylla requires the special CQL row marker
|
||||
# to be supported correctly). BatchWriteItems provides yet another way of
|
||||
# creating items, so check the empty case here too:
|
||||
def test_empty_batch_write(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
with test_table.batch_writer() as batch:
|
||||
batch.put_item({'p': p, 'c': c})
|
||||
assert test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item'] == {'p': p, 'c': c}
|
||||
|
||||
# Test that BatchWriteItems allows writing to multiple tables in one operation
|
||||
def test_batch_write_multiple_tables(test_table_s, test_table):
|
||||
p1 = random_string()
|
||||
c1 = random_string()
|
||||
p2 = random_string()
|
||||
# We use the low-level batch_write_item API for lack of a more convenient
|
||||
# API (the batch_writer() API can only write to one table). At least it
|
||||
# spares us the need to encode the key's types...
|
||||
reply = test_table.meta.client.batch_write_item(RequestItems = {
|
||||
test_table.name: [{'PutRequest': {'Item': {'p': p1, 'c': c1, 'a': 'hi'}}}],
|
||||
test_table_s.name: [{'PutRequest': {'Item': {'p': p2, 'b': 'hello'}}}]
|
||||
})
|
||||
assert test_table.get_item(Key={'p': p1, 'c': c1}, ConsistentRead=True)['Item'] == {'p': p1, 'c': c1, 'a': 'hi'}
|
||||
assert test_table_s.get_item(Key={'p': p2}, ConsistentRead=True)['Item'] == {'p': p2, 'b': 'hello'}
|
||||
|
||||
# Basic test for BatchGetItem, reading several entire items.
|
||||
# Schema has both hash and sort keys.
|
||||
def test_batch_get_item(test_table):
|
||||
@@ -305,16 +251,3 @@ def test_batch_get_item_projection_expression(test_table):
|
||||
got_items = reply['Responses'][test_table.name]
|
||||
expected_items = [{k: item[k] for k in wanted if k in item} for item in items]
|
||||
assert multiset(got_items) == multiset(expected_items)
|
||||
|
||||
# Test that we return the required UnprocessedKeys/UnprocessedItems parameters
|
||||
def test_batch_unprocessed(test_table_s):
|
||||
p = random_string()
|
||||
write_reply = test_table_s.meta.client.batch_write_item(RequestItems = {
|
||||
test_table_s.name: [{'PutRequest': {'Item': {'p': p, 'a': 'hi'}}}],
|
||||
})
|
||||
assert 'UnprocessedItems' in write_reply and write_reply['UnprocessedItems'] == dict()
|
||||
|
||||
read_reply = test_table_s.meta.client.batch_get_item(RequestItems = {
|
||||
test_table_s.name: {'Keys': [{'p': p}], 'ProjectionExpression': 'p, a', 'ConsistentRead': True}
|
||||
})
|
||||
assert 'UnprocessedKeys' in read_reply and read_reply['UnprocessedKeys'] == dict()
|
||||
40
alternator-test/test_condition_expression.py
Normal file
40
alternator-test/test_condition_expression.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Copyright 2019 ScyllaDB
|
||||
#
|
||||
# This file is part of Scylla.
|
||||
#
|
||||
# Scylla is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Scylla is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Tests for the ConditionExpression parameter
|
||||
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from util import random_string
|
||||
|
||||
# Test that ConditionExpression works as expected
|
||||
@pytest.mark.xfail(reason="ConditionExpression not yet implemented")
|
||||
def test_update_condition_expression(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET b = :val1',
|
||||
ExpressionAttributeValues={':val1': 4})
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET b = :val1',
|
||||
ConditionExpression='b = :oldval',
|
||||
ExpressionAttributeValues={':val1': 6, ':oldval': 4})
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException.*'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET b = :val1',
|
||||
ConditionExpression='b = :oldval',
|
||||
ExpressionAttributeValues={':val1': 8, ':oldval': 4})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'b': 6}
|
||||
@@ -22,7 +22,7 @@ import boto3
|
||||
# Test that the DescribeEndpoints operation works as expected: that it
|
||||
# returns one endpoint (it may return more, but it never does this in
|
||||
# Amazon), and this endpoint can be used to make more requests.
|
||||
def test_describe_endpoints(request, dynamodb):
|
||||
def test_describe_endpoints(dynamodb):
|
||||
endpoints = dynamodb.meta.client.describe_endpoints()['Endpoints']
|
||||
# It is not strictly necessary that only a single endpoint be returned,
|
||||
# but this is what Amazon DynamoDB does today (and so does Alternator).
|
||||
@@ -34,16 +34,14 @@ def test_describe_endpoints(request, dynamodb):
|
||||
# send it another describe_endpoints() request ;-) Note that the
|
||||
# address does not include the "http://" or "https://" prefix, and
|
||||
# we need to choose one manually.
|
||||
prefix = "https://" if request.config.getoption('https') else "http://"
|
||||
verify = not request.config.getoption('https')
|
||||
url = prefix + address
|
||||
url = "http://" + address
|
||||
if address.endswith('.amazonaws.com'):
|
||||
boto3.client('dynamodb',endpoint_url=url, verify=verify).describe_endpoints()
|
||||
boto3.client('dynamodb',endpoint_url=url).describe_endpoints()
|
||||
else:
|
||||
# Even though we connect to the local installation, Boto3 still
|
||||
# requires us to specify dummy region and credential parameters,
|
||||
# otherwise the user is forced to properly configure ~/.aws even
|
||||
# for local runs.
|
||||
boto3.client('dynamodb',endpoint_url=url, region_name='us-east-1', aws_access_key_id='alternator', aws_secret_access_key='secret_pass', verify=verify).describe_endpoints()
|
||||
boto3.client('dynamodb',endpoint_url=url, region_name='us-east-1', aws_access_key_id='whatever', aws_secret_access_key='whatever').describe_endpoints()
|
||||
# Nothing to check here - if the above call failed with an exception,
|
||||
# the test would fail.
|
||||
@@ -41,6 +41,7 @@ def test_describe_table_basic(test_table):
|
||||
|
||||
# Test that DescribeTable correctly returns the table's schema, in
|
||||
# AttributeDefinitions and KeySchema attributes
|
||||
@pytest.mark.xfail(reason="DescribeTable does not yet return schema")
|
||||
def test_describe_table_schema(test_table):
|
||||
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
|
||||
expected = { # Copied from test_table()'s fixture
|
||||
@@ -141,6 +142,7 @@ def test_describe_table_stream_specification(test_table):
|
||||
# includes which zone it is on, which account, and of course the table's
|
||||
# name. The ARN format is described in
|
||||
# https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-arns
|
||||
@pytest.mark.xfail(reason="DescribeTable does not return ARN")
|
||||
def test_describe_table_arn(test_table):
|
||||
got = test_table.meta.client.describe_table(TableName=test_table.name)['Table']
|
||||
assert 'TableArn' in got and got['TableArn'].startswith('arn:')
|
||||
369
alternator-test/test_expected.py
Normal file
369
alternator-test/test_expected.py
Normal file
@@ -0,0 +1,369 @@
|
||||
# Copyright 2019 ScyllaDB
|
||||
#
|
||||
# This file is part of Scylla.
|
||||
#
|
||||
# Scylla is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Scylla is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Tests for the "Expected" parameter used to make certain operations (PutItem,
|
||||
# UpdateItem and DeleteItem) conditional on the existing attribute values.
|
||||
# "Expected" is the older version of ConditionExpression parameter, which
|
||||
# is tested by the separate test_condition_expression.py.
|
||||
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from util import random_string
|
||||
|
||||
# Most of the tests in this file check that the "Expected" parameter works for
|
||||
# the UpdateItem operation. It should also work the same for the PutItem and
|
||||
# DeleteItem operations, and we'll make a small effort verifying that at
|
||||
# the end of the file.
|
||||
|
||||
# Somewhat pedanticly, DynamoDB forbids using old-style Expected together
|
||||
# with new-style UpdateExpression... Expected can only be used with
|
||||
# AttributeUpdates (and for UpdateExpression, ConditionExpression should be
|
||||
# used).
|
||||
def test_update_expression_and_expected(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET a = :val1',
|
||||
ExpressionAttributeValues={':val1': 1})
|
||||
with pytest.raises(ClientError, match='ValidationException.*UpdateExpression'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET a = :val1',
|
||||
ExpressionAttributeValues={':val1': 2},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [1]}}
|
||||
)
|
||||
|
||||
# The following string of tests test the various types of Expected conditions
|
||||
# on a single attribute. This condition is defined using ComparisonOperator
|
||||
# (there are many types of those!) or by Value or Exists, and we need to check
|
||||
# all these types of conditions.
|
||||
#
|
||||
# In each case we have tests for the "true" case of the condition, meaning
|
||||
# that the condition evaluates to true and the update is supposed to happen,
|
||||
# and the "false" case, where the condition evaluates to false, so the update
|
||||
# doesn't happen and we get a ConditionalCheckFailedException instead.
|
||||
|
||||
def test_update_expected_1_eq_true(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
# Case where expected and update are on the same attribute:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [1]}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 2}
|
||||
# Case where expected and update are on different attribute:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [2]}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 2, 'b': 3}
|
||||
# For EQ, AttributeValueList must have a single element
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [2, 3]}}
|
||||
)
|
||||
|
||||
# Check that set equality is checked correctly. Unlike string equality (for
|
||||
# example), it cannot be done with just naive string comparison of the JSON
|
||||
# representation, and we need to allow for any order.
|
||||
@pytest.mark.xfail(reason="bug in EQ test of sets")
|
||||
def test_update_expected_1_eq_set(test_table_s):
|
||||
p = random_string()
|
||||
# Because boto3 sorts the set values we give it, in order to generate a
|
||||
# set with a different order, we need to build it incrementally.
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': set(['dog', 'chinchilla']), 'Action': 'PUT'}})
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='ADD a :val1',
|
||||
ExpressionAttributeValues={':val1': set(['cat', 'mouse'])})
|
||||
# Sanity check - the attribute contains the set we think it does
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item']['a'] == set(['chinchilla', 'cat', 'dog', 'mouse'])
|
||||
# Now finally check that "Expected"'s equality check knows the equality too.
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [set(['chinchilla', 'cat', 'dog', 'mouse'])]}}
|
||||
)
|
||||
assert 'b' in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item']
|
||||
|
||||
def test_update_expected_1_eq_false(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [2]}}
|
||||
)
|
||||
# If the compared value has a different type, it results in the
|
||||
# condition failing normally (it's not a validation error).
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': ['dog']}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1}
|
||||
|
||||
def test_update_expected_1_begins_with_true(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 'hello', 'Action': 'PUT'}})
|
||||
# Case where expected and update are on different attribute:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'BEGINS_WITH',
|
||||
'AttributeValueList': ['hell']}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 'hello', 'b': 3}
|
||||
# For BEGIN_WITH, AttributeValueList must have a single element
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': ['hell', 'heaven']}}
|
||||
)
|
||||
|
||||
def test_update_expected_1_begins_with_false(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 'hello', 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': ['dog']}}
|
||||
)
|
||||
# Although BEGINS_WITH requires String or Binary type, giving it a
|
||||
# number results not with a ValidationException but rather a
|
||||
# failed condition (ConditionalCheckFailedException)
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'ComparisonOperator': 'EQ',
|
||||
'AttributeValueList': [3]}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 'hello'}
|
||||
|
||||
# FIXME: need to test many more ComparisonOperator options... See full list in
|
||||
# description in https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html
|
||||
|
||||
# Instead of ComparisonOperator and AttributeValueList, one can specify either
|
||||
# Value or Exists:
|
||||
def test_update_expected_1_value_true(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Value': 1}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'b': 2}
|
||||
|
||||
def test_update_expected_1_value_false(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Value': 2}}
|
||||
)
|
||||
# If the expected attribute is completely missing, the condition also fails
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'z': {'Value': 1}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1}
|
||||
|
||||
def test_update_expected_1_exists_true(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
# Surprisingly, the "Exists: True" cannot be used to confirm that the
|
||||
# attribute had *any* old value (use the NOT_NULL comparison operator
|
||||
# for that). It can only be used together with "Value", and in that case
|
||||
# doesn't mean a thing.
|
||||
# Only "Exists: False" has an interesting meaning.
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True}}
|
||||
)
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'c': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 1}}
|
||||
)
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'d': {'Value': 4, 'Action': 'PUT'}},
|
||||
Expected={'z': {'Exists': False}}
|
||||
)
|
||||
# Exists: False cannot be used together with a Value:
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'c': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': False, 'Value': 1}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'c': 3, 'd': 4}
|
||||
|
||||
def test_update_expected_1_exists_false(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': False}}
|
||||
)
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 2}}
|
||||
)
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1}
|
||||
|
||||
# Test that it's not allowed to combine ComparisonOperator and Exists or Value
|
||||
def test_update_expected_operator_clash(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': False, 'ComparisonOperator': 'EQ', 'AttributeValueList': [3]}})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'b': {'Value': 2, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Value': 3, 'ComparisonOperator': 'EQ', 'AttributeValueList': [3]}})
|
||||
|
||||
# All the previous tests involved a single condition on a single attribute.
|
||||
# The following tests involving multiple conditions on multiple attributes.
|
||||
# ConditionalOperator defaults to AND, and can also be set to OR.
|
||||
|
||||
def test_update_expected_multi_true(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'},
|
||||
'b': {'Value': 2, 'Action': 'PUT'}})
|
||||
# Test several conditions with default "AND" operator
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 1},
|
||||
'b': {'ComparisonOperator': 'EQ', 'AttributeValueList': [2]},
|
||||
'c': {'Exists': False}})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'b': 2, 'z': 3}
|
||||
# Same with explicit "AND" operator
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 4, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 1},
|
||||
'b': {'ComparisonOperator': 'EQ', 'AttributeValueList': [2]},
|
||||
'c': {'Exists': False}},
|
||||
ConditionalOperator="AND")
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'b': 2, 'z': 4}
|
||||
# With "OR" operator, it's enough that just one conditions is true
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 5, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 74},
|
||||
'b': {'ComparisonOperator': 'EQ', 'AttributeValueList': [999]},
|
||||
'c': {'Exists': False}},
|
||||
ConditionalOperator="OR")
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'b': 2, 'z': 5}
|
||||
|
||||
def test_update_expected_multi_false(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'},
|
||||
'b': {'Value': 2, 'Action': 'PUT'},
|
||||
'c': {'Value': 3, 'Action': 'PUT'}})
|
||||
# Test several conditions, one of them false, with default "AND" operator
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 1},
|
||||
'b': {'ComparisonOperator': 'EQ', 'AttributeValueList': [3]},
|
||||
'd': {'Exists': False}})
|
||||
# Same with explicit "AND" operator
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 4, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 1},
|
||||
'b': {'ComparisonOperator': 'EQ', 'AttributeValueList': [3]},
|
||||
'd': {'Exists': False}},
|
||||
ConditionalOperator="AND")
|
||||
# With "OR" operator, all the conditions need to be false to fail
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 5, 'Action': 'PUT'}},
|
||||
Expected={'a': {'Exists': True, 'Value': 74},
|
||||
'b': {'ComparisonOperator': 'EQ', 'AttributeValueList': [999]},
|
||||
'c': {'Exists': False}},
|
||||
ConditionalOperator='OR')
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'b': 2, 'c': 3}
|
||||
|
||||
# Verify the behaviour of an empty Expected parameter:
|
||||
def test_update_expected_empty(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
# An empty Expected array results in a successful update:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 3, 'Action': 'PUT'}},
|
||||
Expected={})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1, 'z': 3}
|
||||
# Trying with ConditionalOperator complains that you can't have
|
||||
# ConditionalOperator without Expected (despite Expected existing, though empty).
|
||||
with pytest.raises(ClientError, match='ValidationException.*ConditionalOperator'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 4, 'Action': 'PUT'}},
|
||||
Expected={}, ConditionalOperator='OR')
|
||||
with pytest.raises(ClientError, match='ValidationException.*ConditionalOperator'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'z': {'Value': 4, 'Action': 'PUT'}},
|
||||
Expected={}, ConditionalOperator='AND')
|
||||
|
||||
# All of the above tests tested "Expected" with the UpdateItem operation.
|
||||
# We now want to test that it works also with the PutItem and DeleteItems
|
||||
# operations. We don't need to check again all the different sub-cases tested
|
||||
# above - we can assume that exactly the same code gets used to test the
|
||||
# expected value. So we just need one test for each operation, to verify that
|
||||
# this code actually gets called.
|
||||
|
||||
def test_delete_item_expected(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.delete_item(Key={'p': p}, Expected={'a': {'Value': 2}})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 1}
|
||||
test_table_s.delete_item(Key={'p': p}, Expected={'a': {'Value': 1}})
|
||||
assert not 'Item' in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)
|
||||
|
||||
def test_put_item_expected(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 1, 'Action': 'PUT'}})
|
||||
test_table_s.put_item(Item={'p': p, 'a': 2}, Expected={'a': {'Value': 1}})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 2}
|
||||
with pytest.raises(ClientError, match='ConditionalCheckFailedException'):
|
||||
test_table_s.put_item(Item={'p': p, 'a': 3}, Expected={'a': {'Value': 1}})
|
||||
@@ -53,9 +53,6 @@ def assert_index_scan(table, index_name, expected_items, **kwargs):
|
||||
|
||||
# Although quite silly, it is actually allowed to create an index which is
|
||||
# identical to the base table.
|
||||
# The following test does not work for KA/LA tables due to #6157,
|
||||
# so it's hereby skipped.
|
||||
@pytest.mark.skip
|
||||
def test_gsi_identical(dynamodb):
|
||||
table = create_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' }],
|
||||
@@ -74,7 +71,7 @@ def test_gsi_identical(dynamodb):
|
||||
# results (in different order).
|
||||
assert multiset(items) == multiset(full_scan(table))
|
||||
assert_index_scan(table, 'hello', items)
|
||||
# We can't scan a non-existent index
|
||||
# We can't scan a non-existant index
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
full_scan(table, IndexName='wrong')
|
||||
table.delete()
|
||||
@@ -153,6 +150,7 @@ def test_gsi_missing_table(dynamodb):
|
||||
dynamodb.meta.client.scan(TableName='nonexistent_table', IndexName='any_name')
|
||||
|
||||
# Verify that strongly-consistent reads on GSI are *not* allowed.
|
||||
@pytest.mark.xfail(reason="GSI strong consistency not checked")
|
||||
def test_gsi_strong_consistency(test_table_gsi_1):
|
||||
with pytest.raises(ClientError, match='ValidationException.*Consistent'):
|
||||
full_query(test_table_gsi_1, KeyConditions={'c': {'AttributeValueList': ['hi'], 'ComparisonOperator': 'EQ'}}, IndexName='hello', ConsistentRead=True)
|
||||
@@ -379,6 +377,7 @@ def test_gsi_3(test_table_gsi_3):
|
||||
KeyConditions={'a': {'AttributeValueList': [items[3]['a']], 'ComparisonOperator': 'EQ'},
|
||||
'b': {'AttributeValueList': [items[3]['b']], 'ComparisonOperator': 'EQ'}})
|
||||
|
||||
@pytest.mark.xfail(reason="GSI in alternator currently have a bug on updating the second regular base column")
|
||||
def test_gsi_update_second_regular_base_column(test_table_gsi_3):
|
||||
items = [{'p': random_string(), 'a': random_string(), 'b': random_string(), 'd': random_string()} for i in range(10)]
|
||||
with test_table_gsi_3.batch_writer() as batch:
|
||||
@@ -390,34 +389,6 @@ def test_gsi_update_second_regular_base_column(test_table_gsi_3):
|
||||
KeyConditions={'a': {'AttributeValueList': [items[3]['a']], 'ComparisonOperator': 'EQ'},
|
||||
'b': {'AttributeValueList': [items[3]['b']], 'ComparisonOperator': 'EQ'}})
|
||||
|
||||
# Test that when a table has a GSI, if the indexed attribute is missing, the
|
||||
# item is added to the base table but not the index.
|
||||
# This is the same feature we already tested in test_gsi_missing_attribute()
|
||||
# above, but on a different table: In that test we used test_table_gsi_2,
|
||||
# with one indexed attribute, and in this test we use test_table_gsi_3 which
|
||||
# has two base regular attributes in the view key, and more possibilities
|
||||
# of which value might be missing. Reproduces issue #6008.
|
||||
def test_gsi_missing_attribute_3(test_table_gsi_3):
|
||||
p = random_string()
|
||||
a = random_string()
|
||||
b = random_string()
|
||||
# First, add an item with a missing "a" value. It should appear in the
|
||||
# base table, but not in the index:
|
||||
test_table_gsi_3.put_item(Item={'p': p, 'b': b})
|
||||
assert test_table_gsi_3.get_item(Key={'p': p})['Item'] == {'p': p, 'b': b}
|
||||
# Note: with eventually consistent read, we can't really be sure that
|
||||
# an item will "never" appear in the index. We hope that if a bug exists
|
||||
# and such an item did appear, sometimes the delay here will be enough
|
||||
# for the unexpected item to become visible.
|
||||
assert not any([i['p'] == p for i in full_scan(test_table_gsi_3, IndexName='hello')])
|
||||
# Same thing for an item with a missing "b" value:
|
||||
test_table_gsi_3.put_item(Item={'p': p, 'a': a})
|
||||
assert test_table_gsi_3.get_item(Key={'p': p})['Item'] == {'p': p, 'a': a}
|
||||
assert not any([i['p'] == p for i in full_scan(test_table_gsi_3, IndexName='hello')])
|
||||
# And for an item missing both:
|
||||
test_table_gsi_3.put_item(Item={'p': p})
|
||||
assert test_table_gsi_3.get_item(Key={'p': p})['Item'] == {'p': p}
|
||||
assert not any([i['p'] == p for i in full_scan(test_table_gsi_3, IndexName='hello')])
|
||||
|
||||
# A fourth scenario of GSI. Two GSIs on a single base table.
|
||||
@pytest.fixture(scope="session")
|
||||
@@ -506,52 +477,6 @@ def test_gsi_5(test_table_gsi_5):
|
||||
KeyConditions={'p': {'AttributeValueList': [p2], 'ComparisonOperator': 'EQ'},
|
||||
'x': {'AttributeValueList': [x2], 'ComparisonOperator': 'EQ'}})
|
||||
|
||||
# Verify that DescribeTable correctly returns the schema of both base-table
|
||||
# and secondary indexes. KeySchema is given for each of the base table and
|
||||
# indexes, and AttributeDefinitions is merged for all of them together.
|
||||
def test_gsi_5_describe_table_schema(test_table_gsi_5):
|
||||
got = test_table_gsi_5.meta.client.describe_table(TableName=test_table_gsi_5.name)['Table']
|
||||
# Copied from test_table_gsi_5 fixture
|
||||
expected_base_keyschema = [
|
||||
{ 'AttributeName': 'p', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'c', 'KeyType': 'RANGE' } ]
|
||||
expected_gsi_keyschema = [
|
||||
{ 'AttributeName': 'p', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'x', 'KeyType': 'RANGE' } ]
|
||||
expected_all_attribute_definitions = [
|
||||
{ 'AttributeName': 'p', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'c', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'x', 'AttributeType': 'S' } ]
|
||||
assert got['KeySchema'] == expected_base_keyschema
|
||||
gsis = got['GlobalSecondaryIndexes']
|
||||
assert len(gsis) == 1
|
||||
assert gsis[0]['KeySchema'] == expected_gsi_keyschema
|
||||
# The list of attribute definitions may be arbitrarily reordered
|
||||
assert multiset(got['AttributeDefinitions']) == multiset(expected_all_attribute_definitions)
|
||||
|
||||
# Similar DescribeTable schema test for test_table_gsi_2. The peculiarity
|
||||
# in that table is that the base table has only a hash key p, and index
|
||||
# only hash hash key x; Now, while internally Scylla needs to add "p" as a
|
||||
# clustering key in the materialized view (in Scylla the view key always
|
||||
# contains the base key), when describing the table, "p" shouldn't be
|
||||
# returned as a range key, because the user didn't ask for it.
|
||||
# This test reproduces issue #5320.
|
||||
@pytest.mark.xfail(reason="GSI DescribeTable spurious range key (#5320)")
|
||||
def test_gsi_2_describe_table_schema(test_table_gsi_2):
|
||||
got = test_table_gsi_2.meta.client.describe_table(TableName=test_table_gsi_2.name)['Table']
|
||||
# Copied from test_table_gsi_2 fixture
|
||||
expected_base_keyschema = [ { 'AttributeName': 'p', 'KeyType': 'HASH' } ]
|
||||
expected_gsi_keyschema = [ { 'AttributeName': 'x', 'KeyType': 'HASH' } ]
|
||||
expected_all_attribute_definitions = [
|
||||
{ 'AttributeName': 'p', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'x', 'AttributeType': 'S' } ]
|
||||
assert got['KeySchema'] == expected_base_keyschema
|
||||
gsis = got['GlobalSecondaryIndexes']
|
||||
assert len(gsis) == 1
|
||||
assert gsis[0]['KeySchema'] == expected_gsi_keyschema
|
||||
# The list of attribute definitions may be arbitrarily reordered
|
||||
assert multiset(got['AttributeDefinitions']) == multiset(expected_all_attribute_definitions)
|
||||
|
||||
# All tests above involved "ProjectionType: ALL". This test checks how
|
||||
# "ProjectionType:: KEYS_ONLY" works. We note that it projects both
|
||||
# the index's key, *and* the base table's key. So items which had different
|
||||
402
alternator-test/test_item.py
Normal file
402
alternator-test/test_item.py
Normal file
@@ -0,0 +1,402 @@
|
||||
# Copyright 2019 ScyllaDB
|
||||
#
|
||||
# This file is part of Scylla.
|
||||
#
|
||||
# Scylla is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Scylla is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Tests for the CRUD item operations: PutItem, GetItem, UpdateItem, DeleteItem
|
||||
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from decimal import Decimal
|
||||
from util import random_string, random_bytes
|
||||
|
||||
# Basic test for creating a new item with a random name, and reading it back
|
||||
# with strong consistency.
|
||||
# Only the string type is used for keys and attributes. None of the various
|
||||
# optional PutItem features (Expected, ReturnValues, ReturnConsumedCapacity,
|
||||
# ReturnItemCollectionMetrics, ConditionalOperator, ConditionExpression,
|
||||
# ExpressionAttributeNames, ExpressionAttributeValues) are used, and
|
||||
# for GetItem strong consistency is requested as well as all attributes,
|
||||
# but no other optional features (AttributesToGet, ReturnConsumedCapacity,
|
||||
# ProjectionExpression, ExpressionAttributeNames)
|
||||
def test_basic_string_put_and_get(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
val = random_string()
|
||||
val2 = random_string()
|
||||
test_table.put_item(Item={'p': p, 'c': c, 'attribute': val, 'another': val2})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item['p'] == p
|
||||
assert item['c'] == c
|
||||
assert item['attribute'] == val
|
||||
assert item['another'] == val2
|
||||
|
||||
# Similar to test_basic_string_put_and_get, just uses UpdateItem instead of
|
||||
# PutItem. Because the item does not yet exist, it should work the same.
|
||||
def test_basic_string_update_and_get(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
val = random_string()
|
||||
val2 = random_string()
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'attribute': {'Value': val, 'Action': 'PUT'}, 'another': {'Value': val2, 'Action': 'PUT'}})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item['p'] == p
|
||||
assert item['c'] == c
|
||||
assert item['attribute'] == val
|
||||
assert item['another'] == val2
|
||||
|
||||
# Test put_item and get_item of various types for the *attributes*,
|
||||
# including both scalars as well as nested documents, lists and sets.
|
||||
# The full list of types tested here:
|
||||
# number, boolean, bytes, null, list, map, string set, number set,
|
||||
# binary set.
|
||||
# The keys are still strings.
|
||||
# Note that only top-level attributes are written and read in this test -
|
||||
# this test does not attempt to modify *nested* attributes.
|
||||
# See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/customizations/dynamodb.html
|
||||
# on how to pass these various types to Boto3's put_item().
|
||||
def test_put_and_get_attribute_types(test_table):
|
||||
key = {'p': random_string(), 'c': random_string()}
|
||||
test_items = [
|
||||
Decimal("12.345"),
|
||||
42,
|
||||
True,
|
||||
False,
|
||||
b'xyz',
|
||||
None,
|
||||
['hello', 'world', 42],
|
||||
{'hello': 'world', 'life': 42},
|
||||
{'hello': {'test': 'hi', 'hello': True, 'list': [1, 2, 'hi']}},
|
||||
set(['hello', 'world', 'hi']),
|
||||
set([1, 42, Decimal("3.14")]),
|
||||
set([b'xyz', b'hi']),
|
||||
]
|
||||
item = { str(i) : test_items[i] for i in range(len(test_items)) }
|
||||
item.update(key)
|
||||
test_table.put_item(Item=item)
|
||||
got_item = test_table.get_item(Key=key, ConsistentRead=True)['Item']
|
||||
assert item == got_item
|
||||
|
||||
# The test_empty_* tests below verify support for empty items, with no
|
||||
# attributes except the key. This is a difficult case for Scylla, because
|
||||
# for an empty row to exist, Scylla needs to add a "CQL row marker".
|
||||
# There are several ways to create empty items - via PutItem, UpdateItem
|
||||
# and deleting attributes from non-empty items, and we need to check them
|
||||
# all, in several test_empty_* tests:
|
||||
def test_empty_put(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
test_table.put_item(Item={'p': p, 'c': c})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item == {'p': p, 'c': c}
|
||||
def test_empty_put_delete(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
test_table.put_item(Item={'p': p, 'c': c, 'hello': 'world'})
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'hello': {'Action': 'DELETE'}})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item == {'p': p, 'c': c}
|
||||
def test_empty_update(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item == {'p': p, 'c': c}
|
||||
def test_empty_update_delete(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'hello': {'Value': 'world', 'Action': 'PUT'}})
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'hello': {'Action': 'DELETE'}})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item == {'p': p, 'c': c}
|
||||
|
||||
# Test error handling of UpdateItem passed a bad "Action" field.
|
||||
def test_update_bad_action(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
val = random_string()
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'attribute': {'Value': val, 'Action': 'NONEXISTENT'}})
|
||||
|
||||
# A more elaborate UpdateItem test, updating different attributes at different
|
||||
# times. Includes PUT and DELETE operations.
|
||||
def test_basic_string_more_update(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
val1 = random_string()
|
||||
val2 = random_string()
|
||||
val3 = random_string()
|
||||
val4 = random_string()
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'a3': {'Value': val1, 'Action': 'PUT'}})
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'a1': {'Value': val1, 'Action': 'PUT'}})
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'a2': {'Value': val2, 'Action': 'PUT'}})
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'a1': {'Value': val3, 'Action': 'PUT'}})
|
||||
test_table.update_item(Key={'p': p, 'c': c}, AttributeUpdates={'a3': {'Action': 'DELETE'}})
|
||||
item = test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item']
|
||||
assert item['p'] == p
|
||||
assert item['c'] == c
|
||||
assert item['a1'] == val3
|
||||
assert item['a2'] == val2
|
||||
assert not 'a3' in item
|
||||
|
||||
# Test that item operations on a non-existant table name fail with correct
|
||||
# error code.
|
||||
def test_item_operations_nonexistent_table(dynamodb):
|
||||
with pytest.raises(ClientError, match='ResourceNotFoundException'):
|
||||
dynamodb.meta.client.put_item(TableName='non_existent_table',
|
||||
Item={'a':{'S':'b'}})
|
||||
|
||||
# Fetching a non-existant item. According to the DynamoDB doc, "If there is no
|
||||
# matching item, GetItem does not return any data and there will be no Item
|
||||
# element in the response."
|
||||
def test_get_item_missing_item(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
assert not "Item" in test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)
|
||||
|
||||
# Test that if we have a table with string hash and sort keys, we can't read
|
||||
# or write items with other key types to it.
|
||||
def test_put_item_wrong_key_type(test_table):
|
||||
b = random_bytes()
|
||||
s = random_string()
|
||||
n = Decimal("3.14")
|
||||
# Should succeed (correct key types)
|
||||
test_table.put_item(Item={'p': s, 'c': s})
|
||||
assert test_table.get_item(Key={'p': s, 'c': s}, ConsistentRead=True)['Item'] == {'p': s, 'c': s}
|
||||
# Should fail (incorrect hash key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.put_item(Item={'p': b, 'c': s})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.put_item(Item={'p': n, 'c': s})
|
||||
# Should fail (incorrect sort key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.put_item(Item={'p': s, 'c': b})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.put_item(Item={'p': s, 'c': n})
|
||||
# Should fail (missing hash key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.put_item(Item={'c': s})
|
||||
# Should fail (missing sort key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.put_item(Item={'p': s})
|
||||
def test_update_item_wrong_key_type(test_table, test_table_s):
|
||||
b = random_bytes()
|
||||
s = random_string()
|
||||
n = Decimal("3.14")
|
||||
# Should succeed (correct key types)
|
||||
test_table.update_item(Key={'p': s, 'c': s}, AttributeUpdates={})
|
||||
assert test_table.get_item(Key={'p': s, 'c': s}, ConsistentRead=True)['Item'] == {'p': s, 'c': s}
|
||||
# Should fail (incorrect hash key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'p': b, 'c': s}, AttributeUpdates={})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'p': n, 'c': s}, AttributeUpdates={})
|
||||
# Should fail (incorrect sort key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'p': s, 'c': b}, AttributeUpdates={})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'p': s, 'c': n}, AttributeUpdates={})
|
||||
# Should fail (missing hash key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'c': s}, AttributeUpdates={})
|
||||
# Should fail (missing sort key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.update_item(Key={'p': s}, AttributeUpdates={})
|
||||
# Should fail (spurious key columns)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': s, 'c': s, 'spurious': s})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.get_item(Key={'p': s, 'c': s})
|
||||
def test_get_item_wrong_key_type(test_table, test_table_s):
|
||||
b = random_bytes()
|
||||
s = random_string()
|
||||
n = Decimal("3.14")
|
||||
# Should succeed (correct key types) but have empty result
|
||||
assert not "Item" in test_table.get_item(Key={'p': s, 'c': s}, ConsistentRead=True)
|
||||
# Should fail (incorrect hash key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': b, 'c': s})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': n, 'c': s})
|
||||
# Should fail (incorrect sort key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': s, 'c': b})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': s, 'c': n})
|
||||
# Should fail (missing hash key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'c': s})
|
||||
# Should fail (missing sort key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': s})
|
||||
# Should fail (spurious key columns)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.get_item(Key={'p': s, 'c': s, 'spurious': s})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.get_item(Key={'p': s, 'c': s})
|
||||
def test_delete_item_wrong_key_type(test_table, test_table_s):
|
||||
b = random_bytes()
|
||||
s = random_string()
|
||||
n = Decimal("3.14")
|
||||
# Should succeed (correct key types)
|
||||
test_table.delete_item(Key={'p': s, 'c': s})
|
||||
# Should fail (incorrect hash key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'p': b, 'c': s})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'p': n, 'c': s})
|
||||
# Should fail (incorrect sort key types)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'p': s, 'c': b})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'p': s, 'c': n})
|
||||
# Should fail (missing hash key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'c': s})
|
||||
# Should fail (missing sort key)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'p': s})
|
||||
# Should fail (spurious key columns)
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table.delete_item(Key={'p': s, 'c': s, 'spurious': s})
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.delete_item(Key={'p': s, 'c': s})
|
||||
|
||||
# Most of the tests here arbitrarily used a table with both hash and sort keys
|
||||
# (both strings). Let's check that a table with *only* a hash key works ok
|
||||
# too, for PutItem, GetItem, and UpdateItem.
|
||||
def test_only_hash_key(test_table_s):
|
||||
s = random_string()
|
||||
test_table_s.put_item(Item={'p': s, 'hello': 'world'})
|
||||
assert test_table_s.get_item(Key={'p': s}, ConsistentRead=True)['Item'] == {'p': s, 'hello': 'world'}
|
||||
test_table_s.update_item(Key={'p': s}, AttributeUpdates={'hi': {'Value': 'there', 'Action': 'PUT'}})
|
||||
assert test_table_s.get_item(Key={'p': s}, ConsistentRead=True)['Item'] == {'p': s, 'hello': 'world', 'hi': 'there'}
|
||||
|
||||
# Tests for item operations in tables with non-string hash or sort keys.
|
||||
# These tests focus only on the type of the key - everything else is as
|
||||
# simple as we can (string attributes, no special options for GetItem
|
||||
# and PutItem). These tests also focus on individual items only, and
|
||||
# not about the sort order of sort keys - this should be verified in
|
||||
# test_query.py, for example.
|
||||
def test_bytes_hash_key(test_table_b):
|
||||
# Bytes values are passed using base64 encoding, which has weird cases
|
||||
# depending on len%3 and len%4. So let's try various lengths.
|
||||
for len in range(10,18):
|
||||
p = random_bytes(len)
|
||||
val = random_string()
|
||||
test_table_b.put_item(Item={'p': p, 'attribute': val})
|
||||
assert test_table_b.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'attribute': val}
|
||||
def test_bytes_sort_key(test_table_sb):
|
||||
p = random_string()
|
||||
c = random_bytes()
|
||||
val = random_string()
|
||||
test_table_sb.put_item(Item={'p': p, 'c': c, 'attribute': val})
|
||||
assert test_table_sb.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item'] == {'p': p, 'c': c, 'attribute': val}
|
||||
|
||||
# Tests for using a large binary blob as hash key, sort key, or attribute.
|
||||
# DynamoDB strictly limits the size of the binary hash key to 2048 bytes,
|
||||
# and binary sort key to 1024 bytes, and refuses anything larger. The total
|
||||
# size of an item is limited to 400KB, which also limits the size of the
|
||||
# largest attributes. For more details on these limits, see
|
||||
# https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
|
||||
# Alternator currently does *not* have these limitations, and can accept much
|
||||
# larger keys and attributes, but what we do in the following tests is to verify
|
||||
# that items up to DynamoDB's maximum sizes also work well in Alternator.
|
||||
def test_large_blob_hash_key(test_table_b):
|
||||
b = random_bytes(2048)
|
||||
test_table_b.put_item(Item={'p': b})
|
||||
assert test_table_b.get_item(Key={'p': b}, ConsistentRead=True)['Item'] == {'p': b}
|
||||
def test_large_blob_sort_key(test_table_sb):
|
||||
s = random_string()
|
||||
b = random_bytes(1024)
|
||||
test_table_sb.put_item(Item={'p': s, 'c': b})
|
||||
assert test_table_sb.get_item(Key={'p': s, 'c': b}, ConsistentRead=True)['Item'] == {'p': s, 'c': b}
|
||||
def test_large_blob_attribute(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
b = random_bytes(409500) # a bit less than 400KB
|
||||
test_table.put_item(Item={'p': p, 'c': c, 'attribute': b })
|
||||
assert test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item'] == {'p': p, 'c': c, 'attribute': b}
|
||||
|
||||
# Checks what it is not allowed to use in a single UpdateItem request both
|
||||
# old-style AttributeUpdates and new-style UpdateExpression.
|
||||
def test_update_item_two_update_methods(test_table_s):
|
||||
p = random_string()
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
AttributeUpdates={'a': {'Value': 3, 'Action': 'PUT'}},
|
||||
UpdateExpression='SET b = :val1',
|
||||
ExpressionAttributeValues={':val1': 4})
|
||||
|
||||
# Verify that having neither AttributeUpdates nor UpdateExpression is
|
||||
# allowed, and results in creation of an empty item.
|
||||
def test_update_item_no_update_method(test_table_s):
|
||||
p = random_string()
|
||||
assert not "Item" in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)
|
||||
test_table_s.update_item(Key={'p': p})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p}
|
||||
|
||||
# Test GetItem with the AttributesToGet parameter. Result should include the
|
||||
# selected attributes only - if one wants the key attributes as well, one
|
||||
# needs to select them explicitly. When no key attributes are selected,
|
||||
# some items may have *none* of the selected attributes. Those items are
|
||||
# returned too, as empty items - they are not outright missing.
|
||||
def test_getitem_attributes_to_get(dynamodb, test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
item = {'p': p, 'c': c, 'a': 'hello', 'b': 'hi'}
|
||||
test_table.put_item(Item=item)
|
||||
for wanted in [ ['a'], # only non-key attribute
|
||||
['c', 'a'], # a key attribute (sort key) and non-key
|
||||
['p', 'c'], # entire key
|
||||
['nonexistent'] # Our item doesn't have this
|
||||
]:
|
||||
got_item = test_table.get_item(Key={'p': p, 'c': c}, AttributesToGet=wanted, ConsistentRead=True)['Item']
|
||||
expected_item = {k: item[k] for k in wanted if k in item}
|
||||
assert expected_item == got_item
|
||||
|
||||
# Basic test for DeleteItem, with hash key only
|
||||
def test_delete_item_hash(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.put_item(Item={'p': p})
|
||||
assert 'Item' in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)
|
||||
test_table_s.delete_item(Key={'p': p})
|
||||
assert not 'Item' in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)
|
||||
|
||||
# Basic test for DeleteItem, with hash and sort key
|
||||
def test_delete_item_sort(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
key = {'p': p, 'c': c}
|
||||
test_table.put_item(Item=key)
|
||||
assert 'Item' in test_table.get_item(Key=key, ConsistentRead=True)
|
||||
test_table.delete_item(Key=key)
|
||||
assert not 'Item' in test_table.get_item(Key=key, ConsistentRead=True)
|
||||
|
||||
# Test that PutItem completely replaces an existing item. It shouldn't merge
|
||||
# it with a previously existing value, as UpdateItem does!
|
||||
# We test for a table with just hash key, and for a table with both hash and
|
||||
# sort keys.
|
||||
def test_put_item_replace(test_table_s, test_table):
|
||||
p = random_string()
|
||||
test_table_s.put_item(Item={'p': p, 'a': 'hi'})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'a': 'hi'}
|
||||
test_table_s.put_item(Item={'p': p, 'b': 'hello'})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'b': 'hello'}
|
||||
c = random_string()
|
||||
test_table.put_item(Item={'p': p, 'c': c, 'a': 'hi'})
|
||||
assert test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item'] == {'p': p, 'c': c, 'a': 'hi'}
|
||||
test_table.put_item(Item={'p': p, 'c': c, 'b': 'hello'})
|
||||
assert test_table.get_item(Key={'p': p, 'c': c}, ConsistentRead=True)['Item'] == {'p': p, 'c': c, 'b': 'hello'}
|
||||
@@ -26,23 +26,26 @@ import time
|
||||
from botocore.exceptions import ClientError, ParamValidationError
|
||||
from util import create_test_table, random_string, full_scan, full_query, multiset, list_tables
|
||||
|
||||
# LSIs support strongly-consistent reads, so the following functions do not
|
||||
# need to retry like we did in test_gsi.py for GSIs:
|
||||
# Currently, Alternator's LSIs only support eventually consistent reads, so tests
|
||||
# that involve writing to a table and then expect to read something from it cannot
|
||||
# be guaranteed to succeed without retrying the read. The following utility
|
||||
# functions make it easy to write such tests.
|
||||
def assert_index_query(table, index_name, expected_items, **kwargs):
|
||||
assert multiset(expected_items) == multiset(full_query(table, IndexName=index_name, ConsistentRead=True, **kwargs))
|
||||
def assert_index_scan(table, index_name, expected_items, **kwargs):
|
||||
assert multiset(expected_items) == multiset(full_scan(table, IndexName=index_name, ConsistentRead=True, **kwargs))
|
||||
|
||||
# A version doing retries instead of ConsistentRead, to be used just for the
|
||||
# one test below which has both GSI and LSI:
|
||||
def retrying_assert_index_query(table, index_name, expected_items, **kwargs):
|
||||
for i in range(3):
|
||||
if multiset(expected_items) == multiset(full_query(table, IndexName=index_name, **kwargs)):
|
||||
return
|
||||
print('retrying_assert_index_query retrying')
|
||||
print('assert_index_query retrying')
|
||||
time.sleep(1)
|
||||
assert multiset(expected_items) == multiset(full_query(table, IndexName=index_name, **kwargs))
|
||||
|
||||
def assert_index_scan(table, index_name, expected_items, **kwargs):
|
||||
for i in range(3):
|
||||
if multiset(expected_items) == multiset(full_scan(table, IndexName=index_name, **kwargs)):
|
||||
return
|
||||
print('assert_index_scan retrying')
|
||||
time.sleep(1)
|
||||
assert multiset(expected_items) == multiset(full_scan(table, IndexName=index_name, **kwargs))
|
||||
|
||||
# Although quite silly, it is actually allowed to create an index which is
|
||||
# identical to the base table.
|
||||
def test_lsi_identical(dynamodb):
|
||||
@@ -63,7 +66,7 @@ def test_lsi_identical(dynamodb):
|
||||
# results (in different order).
|
||||
assert multiset(items) == multiset(full_scan(table))
|
||||
assert_index_scan(table, 'hello', items)
|
||||
# We can't scan a non-existent index
|
||||
# We can't scan a non-existant index
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
full_scan(table, IndexName='wrong')
|
||||
table.delete()
|
||||
@@ -299,11 +302,13 @@ def test_lsi_consistent_read(test_table_lsi_1):
|
||||
expected_items = [i for i in items if i['p'] == p1 and i['b'] == b1]
|
||||
assert_index_query(test_table_lsi_1, 'hello', expected_items,
|
||||
KeyConditions={'p': {'AttributeValueList': [p1], 'ComparisonOperator': 'EQ'},
|
||||
'b': {'AttributeValueList': [b1], 'ComparisonOperator': 'EQ'}})
|
||||
'b': {'AttributeValueList': [b1], 'ComparisonOperator': 'EQ'}},
|
||||
ConsistentRead=True)
|
||||
expected_items = [i for i in items if i['p'] == p2 and i['b'] == b2]
|
||||
assert_index_query(test_table_lsi_1, 'hello', expected_items,
|
||||
KeyConditions={'p': {'AttributeValueList': [p2], 'ComparisonOperator': 'EQ'},
|
||||
'b': {'AttributeValueList': [b2], 'ComparisonOperator': 'EQ'}})
|
||||
'b': {'AttributeValueList': [b2], 'ComparisonOperator': 'EQ'}},
|
||||
ConsistentRead=True)
|
||||
|
||||
# A table with both gsi and lsi present
|
||||
@pytest.fixture(scope="session")
|
||||
@@ -355,6 +360,6 @@ def test_lsi_and_gsi(test_table_lsi_gsi):
|
||||
|
||||
for index in ['hello_g1', 'hello_l1']:
|
||||
expected_items = [i for i in items if i['p'] == p1 and i['x1'] == x1]
|
||||
retrying_assert_index_query(test_table_lsi_gsi, index, expected_items,
|
||||
assert_index_query(test_table_lsi_gsi, index, expected_items,
|
||||
KeyConditions={'p': {'AttributeValueList': [p1], 'ComparisonOperator': 'EQ'},
|
||||
'x1': {'AttributeValueList': [x1], 'ComparisonOperator': 'EQ'}})
|
||||
@@ -134,10 +134,10 @@ def test_projection_expression_path(test_table_s):
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True, ProjectionExpression='a.b[0]')['Item'] == {'a': {'b': [2]}}
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True, ProjectionExpression='a.b[2]')['Item'] == {'a': {'b': [{'x': 'hi', 'y': 'yo'}]}}
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True, ProjectionExpression='a.b[2].y')['Item'] == {'a': {'b': [{'y': 'yo'}]}}
|
||||
# Trying to read any sort of non-existent attribute returns an empty item.
|
||||
# Trying to read any sort of non-existant attribute returns an empty item.
|
||||
# This includes a non-existing top-level attribute, an attempt to read
|
||||
# beyond the end of an array or a non-existent member of a dictionary, as
|
||||
# well as paths which begin with a non-existent prefix.
|
||||
# beyond the end of an array or a non-existant member of a dictionary, as
|
||||
# well as paths which begin with a non-existant prefix.
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True, ProjectionExpression='x')['Item'] == {}
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True, ProjectionExpression='a.b[3]')['Item'] == {}
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True, ProjectionExpression='a.x')['Item'] == {}
|
||||
358
alternator-test/test_query.py
Normal file
358
alternator-test/test_query.py
Normal file
@@ -0,0 +1,358 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 ScyllaDB
|
||||
#
|
||||
# This file is part of Scylla.
|
||||
#
|
||||
# Scylla is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Scylla is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Tests for the Query operation
|
||||
|
||||
import random
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from decimal import Decimal
|
||||
from util import random_string, random_bytes, full_query, multiset
|
||||
from boto3.dynamodb.conditions import Key, Attr
|
||||
|
||||
# Test that scanning works fine with in-stock paginator
|
||||
def test_query_basic_restrictions(dynamodb, filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
paginator = dynamodb.meta.client.get_paginator('query')
|
||||
|
||||
# EQ
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long']) == multiset(got_items)
|
||||
|
||||
# LT
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': ['12'], 'ComparisonOperator': 'LT'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'] < '12']) == multiset(got_items)
|
||||
|
||||
# LE
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': ['14'], 'ComparisonOperator': 'LE'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'] <= '14']) == multiset(got_items)
|
||||
|
||||
# GT
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': ['15'], 'ComparisonOperator': 'GT'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'] > '15']) == multiset(got_items)
|
||||
|
||||
# GE
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': ['14'], 'ComparisonOperator': 'GE'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'] >= '14']) == multiset(got_items)
|
||||
|
||||
# BETWEEN
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': ['155', '164'], 'ComparisonOperator': 'BETWEEN'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'] >= '155' and item['c'] <= '164']) == multiset(got_items)
|
||||
|
||||
# BEGINS_WITH
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': ['11'], 'ComparisonOperator': 'BEGINS_WITH'}
|
||||
}):
|
||||
print([item for item in items if item['p'] == 'long' and item['c'].startswith('11')])
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'].startswith('11')]) == multiset(got_items)
|
||||
|
||||
# Test that KeyConditionExpression parameter is supported
|
||||
@pytest.mark.xfail(reason="KeyConditionExpression not supported yet")
|
||||
def test_query_key_condition_expression(dynamodb, filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
paginator = dynamodb.meta.client.get_paginator('query')
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditionExpression=Key("p").eq("long") & Key("c").lt("12")):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['c'] < '12']) == multiset(got_items)
|
||||
|
||||
def test_begins_with(dynamodb, test_table):
|
||||
paginator = dynamodb.meta.client.get_paginator('query')
|
||||
items = [{'p': 'unorthodox_chars', 'c': sort_key, 'str': 'a'} for sort_key in [u'ÿÿÿ', u'cÿbÿ', u'cÿbÿÿabg'] ]
|
||||
with test_table.batch_writer() as batch:
|
||||
for item in items:
|
||||
batch.put_item(item)
|
||||
|
||||
# TODO(sarna): Once bytes type is supported, /xFF character should be tested
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['unorthodox_chars'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': [u'ÿÿ'], 'ComparisonOperator': 'BEGINS_WITH'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert sorted([d['c'] for d in got_items]) == sorted([d['c'] for d in items if d['c'].startswith(u'ÿÿ')])
|
||||
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['unorthodox_chars'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': [u'cÿbÿ'], 'ComparisonOperator': 'BEGINS_WITH'}
|
||||
}):
|
||||
got_items += page['Items']
|
||||
print(got_items)
|
||||
assert sorted([d['c'] for d in got_items]) == sorted([d['c'] for d in items if d['c'].startswith(u'cÿbÿ')])
|
||||
|
||||
def test_begins_with_wrong_type(dynamodb, test_table_sn):
|
||||
paginator = dynamodb.meta.client.get_paginator('query')
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
for page in paginator.paginate(TableName=test_table_sn.name, KeyConditions={
|
||||
'p' : {'AttributeValueList': ['unorthodox_chars'], 'ComparisonOperator': 'EQ'},
|
||||
'c' : {'AttributeValueList': [17], 'ComparisonOperator': 'BEGINS_WITH'}
|
||||
}):
|
||||
pass
|
||||
|
||||
# Items returned by Query should be sorted by the sort key. The following
|
||||
# tests verify that this is indeed the case, for the three allowed key types:
|
||||
# strings, binary, and numbers. These tests test not just the Query operation,
|
||||
# but inherently that the sort-key sorting works.
|
||||
def test_query_sort_order_string(test_table):
|
||||
# Insert a lot of random items in one new partition:
|
||||
# str(i) has a non-obvious sort order (e.g., "100" comes before "2") so is a nice test.
|
||||
p = random_string()
|
||||
items = [{'p': p, 'c': str(i)} for i in range(128)]
|
||||
with test_table.batch_writer() as batch:
|
||||
for item in items:
|
||||
batch.put_item(item)
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
|
||||
assert len(items) == len(got_items)
|
||||
# Extract just the sort key ("c") from the items
|
||||
sort_keys = [x['c'] for x in items]
|
||||
got_sort_keys = [x['c'] for x in got_items]
|
||||
# Verify that got_sort_keys are already sorted (in string order)
|
||||
assert sorted(got_sort_keys) == got_sort_keys
|
||||
# Verify that got_sort_keys are a sorted version of the expected sort_keys
|
||||
assert sorted(sort_keys) == got_sort_keys
|
||||
def test_query_sort_order_bytes(test_table_sb):
|
||||
# Insert a lot of random items in one new partition:
|
||||
# We arbitrarily use random_bytes with a random length.
|
||||
p = random_string()
|
||||
items = [{'p': p, 'c': random_bytes(10)} for i in range(128)]
|
||||
with test_table_sb.batch_writer() as batch:
|
||||
for item in items:
|
||||
batch.put_item(item)
|
||||
got_items = full_query(test_table_sb, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
|
||||
assert len(items) == len(got_items)
|
||||
sort_keys = [x['c'] for x in items]
|
||||
got_sort_keys = [x['c'] for x in got_items]
|
||||
# Boto3's "Binary" objects are sorted as if bytes are signed integers.
|
||||
# This isn't the order that DynamoDB itself uses (byte 0 should be first,
|
||||
# not byte -128). Sorting the byte array ".value" works.
|
||||
assert sorted(got_sort_keys, key=lambda x: x.value) == got_sort_keys
|
||||
assert sorted(sort_keys) == got_sort_keys
|
||||
def test_query_sort_order_number(test_table_sn):
|
||||
# This is a list of numbers, sorted in correct order, and each suitable
|
||||
# for accurate representation by Alternator's number type.
|
||||
numbers = [
|
||||
Decimal("-2e10"),
|
||||
Decimal("-7.1e2"),
|
||||
Decimal("-4.1"),
|
||||
Decimal("-0.1"),
|
||||
Decimal("-1e-5"),
|
||||
Decimal("0"),
|
||||
Decimal("2e-5"),
|
||||
Decimal("0.15"),
|
||||
Decimal("1"),
|
||||
Decimal("1.00000000000000000000000001"),
|
||||
Decimal("3.14159"),
|
||||
Decimal("3.1415926535897932384626433832795028841"),
|
||||
Decimal("31.4"),
|
||||
Decimal("1.4e10"),
|
||||
]
|
||||
# Insert these numbers, in random order, into one partition:
|
||||
p = random_string()
|
||||
items = [{'p': p, 'c': num} for num in random.sample(numbers, len(numbers))]
|
||||
with test_table_sn.batch_writer() as batch:
|
||||
for item in items:
|
||||
batch.put_item(item)
|
||||
# Finally, verify that we get back exactly the same numbers (with identical
|
||||
# precision), and in their original sorted order.
|
||||
got_items = full_query(test_table_sn, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
|
||||
got_sort_keys = [x['c'] for x in got_items]
|
||||
assert got_sort_keys == numbers
|
||||
|
||||
def test_query_filtering_attributes_equality(filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
|
||||
query_filter = {
|
||||
"attribute" : {
|
||||
"AttributeValueList" : [ "xxxx" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, QueryFilter=query_filter)
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx']) == multiset(got_items)
|
||||
|
||||
query_filter = {
|
||||
"attribute" : {
|
||||
"AttributeValueList" : [ "xxxx" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
},
|
||||
"another" : {
|
||||
"AttributeValueList" : [ "yy" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, QueryFilter=query_filter)
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx' and item['another'] == 'yy']) == multiset(got_items)
|
||||
|
||||
# Test that FilterExpression works as expected
|
||||
@pytest.mark.xfail(reason="FilterExpression not supported yet")
|
||||
def test_query_filter_expression(filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, FilterExpression=Attr("attribute").eq("xxxx"))
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx']) == multiset(got_items)
|
||||
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, FilterExpression=Attr("attribute").eq("xxxx") & Attr("another").eq("yy"))
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if item['p'] == 'long' and item['attribute'] == 'xxxx' and item['another'] == 'yy']) == multiset(got_items)
|
||||
|
||||
# QueryFilter can only contain non-key attributes in order to be compatible
|
||||
def test_query_filtering_key_equality(filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
query_filter = {
|
||||
"c" : {
|
||||
"AttributeValueList" : [ "5" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, QueryFilter=query_filter)
|
||||
print(got_items)
|
||||
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
query_filter = {
|
||||
"attribute" : {
|
||||
"AttributeValueList" : [ "x" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
},
|
||||
"p" : {
|
||||
"AttributeValueList" : [ "5" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': ['long'], 'ComparisonOperator': 'EQ'}}, QueryFilter=query_filter)
|
||||
print(got_items)
|
||||
|
||||
# Test Query with the AttributesToGet parameter. Result should include the
|
||||
# selected attributes only - if one wants the key attributes as well, one
|
||||
# needs to select them explicitly. When no key attributes are selected,
|
||||
# some items may have *none* of the selected attributes. Those items are
|
||||
# returned too, as empty items - they are not outright missing.
|
||||
def test_query_attributes_to_get(dynamodb, test_table):
|
||||
p = random_string()
|
||||
items = [{'p': p, 'c': str(i), 'a': str(i*10), 'b': str(i*100) } for i in range(10)]
|
||||
with test_table.batch_writer() as batch:
|
||||
for item in items:
|
||||
batch.put_item(item)
|
||||
for wanted in [ ['a'], # only non-key attributes
|
||||
['c', 'a'], # a key attribute (sort key) and non-key
|
||||
['p', 'c'], # entire key
|
||||
['nonexistent'] # none of the items have this attribute!
|
||||
]:
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}}, AttributesToGet=wanted)
|
||||
expected_items = [{k: x[k] for k in wanted if k in x} for x in items]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
# Test that in a table with both hash key and sort key, which keys we can
|
||||
# Query by: We can Query by the hash key, by a combination of both hash and
|
||||
# sort keys, but *cannot* query by just the sort key, and obviously not
|
||||
# by any non-key column.
|
||||
def test_query_which_key(test_table):
|
||||
p = random_string()
|
||||
c = random_string()
|
||||
p2 = random_string()
|
||||
c2 = random_string()
|
||||
item1 = {'p': p, 'c': c}
|
||||
item2 = {'p': p, 'c': c2}
|
||||
item3 = {'p': p2, 'c': c}
|
||||
for i in [item1, item2, item3]:
|
||||
test_table.put_item(Item=i)
|
||||
# Query by hash key only:
|
||||
got_items = full_query(test_table, KeyConditions={'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'}})
|
||||
expected_items = [item1, item2]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
# Query by hash key *and* sort key (this is basically a GetItem):
|
||||
got_items = full_query(test_table, KeyConditions={
|
||||
'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'},
|
||||
'c': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
|
||||
})
|
||||
expected_items = [item1]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
# Query by sort key alone is not allowed. DynamoDB reports:
|
||||
# "Query condition missed key schema element: p".
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
full_query(test_table, KeyConditions={
|
||||
'c': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
|
||||
})
|
||||
# Query by a non-key isn't allowed, for the same reason - that the
|
||||
# actual hash key (p) is missing in the query:
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
full_query(test_table, KeyConditions={
|
||||
'z': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
|
||||
})
|
||||
# If we try both p and a non-key we get a complaint that the sort
|
||||
# key is missing: "Query condition missed key schema element: c"
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
full_query(test_table, KeyConditions={
|
||||
'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'},
|
||||
'z': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
|
||||
})
|
||||
# If we try p, c and another key, we get an error that
|
||||
# "Conditions can be of length 1 or 2 only".
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
full_query(test_table, KeyConditions={
|
||||
'p': {'AttributeValueList': [p], 'ComparisonOperator': 'EQ'},
|
||||
'c': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'},
|
||||
'z': {'AttributeValueList': [c], 'ComparisonOperator': 'EQ'}
|
||||
})
|
||||
191
alternator-test/test_scan.py
Normal file
191
alternator-test/test_scan.py
Normal file
@@ -0,0 +1,191 @@
|
||||
# Copyright 2019 ScyllaDB
|
||||
#
|
||||
# This file is part of Scylla.
|
||||
#
|
||||
# Scylla is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Scylla is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Tests for the Scan operation
|
||||
|
||||
import pytest
|
||||
from botocore.exceptions import ClientError
|
||||
from util import random_string, full_scan, multiset
|
||||
from boto3.dynamodb.conditions import Attr
|
||||
|
||||
# Test that scanning works fine with/without pagination
|
||||
def test_scan_basic(filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
for limit in [None,1,2,4,33,50,100,9007,16*1024*1024]:
|
||||
pos = None
|
||||
got_items = []
|
||||
while True:
|
||||
if limit:
|
||||
response = test_table.scan(Limit=limit, ExclusiveStartKey=pos) if pos else test_table.scan(Limit=limit)
|
||||
assert len(response['Items']) <= limit
|
||||
else:
|
||||
response = test_table.scan(ExclusiveStartKey=pos) if pos else test_table.scan()
|
||||
pos = response.get('LastEvaluatedKey', None)
|
||||
got_items += response['Items']
|
||||
if not pos:
|
||||
break
|
||||
|
||||
assert len(items) == len(got_items)
|
||||
assert multiset(items) == multiset(got_items)
|
||||
|
||||
def test_scan_with_paginator(dynamodb, filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
paginator = dynamodb.meta.client.get_paginator('scan')
|
||||
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name):
|
||||
got_items += page['Items']
|
||||
|
||||
assert len(items) == len(got_items)
|
||||
assert multiset(items) == multiset(got_items)
|
||||
|
||||
for page_size in [1, 17, 1234]:
|
||||
got_items = []
|
||||
for page in paginator.paginate(TableName=test_table.name, PaginationConfig={'PageSize': page_size}):
|
||||
got_items += page['Items']
|
||||
|
||||
assert len(items) == len(got_items)
|
||||
assert multiset(items) == multiset(got_items)
|
||||
|
||||
# Although partitions are scanned in seemingly-random order, inside a
|
||||
# partition items must be returned by Scan sorted in sort-key order.
|
||||
# This test verifies this, for string sort key. We'll need separate
|
||||
# tests for the other sort-key types (number and binary)
|
||||
def test_scan_sort_order_string(filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
got_items = full_scan(test_table)
|
||||
assert len(items) == len(got_items)
|
||||
# Extract just the sort key ("c") from the partition "long"
|
||||
items_long = [x['c'] for x in items if x['p'] == 'long']
|
||||
got_items_long = [x['c'] for x in got_items if x['p'] == 'long']
|
||||
# Verify that got_items_long are already sorted (in string order)
|
||||
assert sorted(got_items_long) == got_items_long
|
||||
# Verify that got_items_long are a sorted version of the expected items_long
|
||||
assert sorted(items_long) == got_items_long
|
||||
|
||||
# Test Scan with the AttributesToGet parameter. Result should include the
|
||||
# selected attributes only - if one wants the key attributes as well, one
|
||||
# needs to select them explicitly. When no key attributes are selected,
|
||||
# some items may have *none* of the selected attributes. Those items are
|
||||
# returned too, as empty items - they are not outright missing.
|
||||
def test_scan_attributes_to_get(dynamodb, filled_test_table):
|
||||
table, items = filled_test_table
|
||||
for wanted in [ ['another'], # only non-key attributes (one item doesn't have it!)
|
||||
['c', 'another'], # a key attribute (sort key) and non-key
|
||||
['p', 'c'], # entire key
|
||||
['nonexistent'] # none of the items have this attribute!
|
||||
]:
|
||||
print(wanted)
|
||||
got_items = full_scan(table, AttributesToGet=wanted)
|
||||
expected_items = [{k: x[k] for k in wanted if k in x} for x in items]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
def test_scan_with_attribute_equality_filtering(dynamodb, filled_test_table):
|
||||
table, items = filled_test_table
|
||||
scan_filter = {
|
||||
"attribute" : {
|
||||
"AttributeValueList" : [ "xxxxx" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
|
||||
got_items = full_scan(table, ScanFilter=scan_filter)
|
||||
expected_items = [item for item in items if "attribute" in item.keys() and item["attribute"] == "xxxxx" ]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
scan_filter = {
|
||||
"another" : {
|
||||
"AttributeValueList" : [ "y" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
},
|
||||
"attribute" : {
|
||||
"AttributeValueList" : [ "xxxxx" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
|
||||
got_items = full_scan(table, ScanFilter=scan_filter)
|
||||
expected_items = [item for item in items if "attribute" in item.keys() and item["attribute"] == "xxxxx" and item["another"] == "y" ]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
# Test that FilterExpression works as expected
|
||||
@pytest.mark.xfail(reason="FilterExpression not supported yet")
|
||||
def test_scan_filter_expression(filled_test_table):
|
||||
test_table, items = filled_test_table
|
||||
|
||||
got_items = full_scan(test_table, FilterExpression=Attr("attribute").eq("xxxx"))
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if 'attribute' in item.keys() and item['attribute'] == 'xxxx']) == multiset(got_items)
|
||||
|
||||
got_items = full_scan(test_table, FilterExpression=Attr("attribute").eq("xxxx") & Attr("another").eq("yy"))
|
||||
print(got_items)
|
||||
assert multiset([item for item in items if 'attribute' in item.keys() and 'another' in item.keys() and item['attribute'] == 'xxxx' and item['another'] == 'yy']) == multiset(got_items)
|
||||
|
||||
def test_scan_with_key_equality_filtering(dynamodb, filled_test_table):
|
||||
table, items = filled_test_table
|
||||
scan_filter_p = {
|
||||
"p" : {
|
||||
"AttributeValueList" : [ "7" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
scan_filter_c = {
|
||||
"c" : {
|
||||
"AttributeValueList" : [ "9" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
scan_filter_p_and_attribute = {
|
||||
"p" : {
|
||||
"AttributeValueList" : [ "7" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
},
|
||||
"attribute" : {
|
||||
"AttributeValueList" : [ "x"*7 ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
scan_filter_c_and_another = {
|
||||
"c" : {
|
||||
"AttributeValueList" : [ "9" ],
|
||||
"ComparisonOperator": "EQ"
|
||||
},
|
||||
"another" : {
|
||||
"AttributeValueList" : [ "y"*16 ],
|
||||
"ComparisonOperator": "EQ"
|
||||
}
|
||||
}
|
||||
|
||||
# Filtering on the hash key
|
||||
got_items = full_scan(table, ScanFilter=scan_filter_p)
|
||||
expected_items = [item for item in items if "p" in item.keys() and item["p"] == "7" ]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
# Filtering on the sort key
|
||||
got_items = full_scan(table, ScanFilter=scan_filter_c)
|
||||
expected_items = [item for item in items if "c" in item.keys() and item["c"] == "9"]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
# Filtering on the hash key and an attribute
|
||||
got_items = full_scan(table, ScanFilter=scan_filter_p_and_attribute)
|
||||
expected_items = [item for item in items if "p" in item.keys() and "another" in item.keys() and item["p"] == "7" and item["another"] == "y"*16]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
|
||||
# Filtering on the sort key and an attribute
|
||||
got_items = full_scan(table, ScanFilter=scan_filter_c_and_another)
|
||||
expected_items = [item for item in items if "c" in item.keys() and "another" in item.keys() and item["c"] == "9" and item["another"] == "y"*16]
|
||||
assert multiset(expected_items) == multiset(got_items)
|
||||
@@ -74,11 +74,6 @@ def create_and_delete_table(dynamodb, name, **kwargs):
|
||||
def test_create_and_delete_table(dynamodb):
|
||||
create_and_delete_table(dynamodb, 'alternator_test')
|
||||
|
||||
# Test that recreating a table right after deleting it works without issues
|
||||
def test_recreate_table(dynamodb):
|
||||
create_and_delete_table(dynamodb, 'alternator_recr_test')
|
||||
create_and_delete_table(dynamodb, 'alternator_recr_test')
|
||||
|
||||
# DynamoDB documentation specifies that table names must be 3-255 characters,
|
||||
# and match the regex [a-zA-Z0-9._-]+. Names not matching these rules should
|
||||
# be rejected, and no table be created.
|
||||
@@ -232,35 +227,6 @@ def test_create_table_billing_mode_errors(dynamodb, test_table):
|
||||
KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }],
|
||||
AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }])
|
||||
|
||||
# Even before Alternator gains full support for the DynamoDB stream API
|
||||
# and CreateTable's StreamSpecification option, we should support the
|
||||
# options which mean it is turned *off*.
|
||||
def test_table_streams_off(dynamodb):
|
||||
# If StreamSpecification is given, but has StreamEnabled=false, it's as
|
||||
# if StreamSpecification was missing. StreamViewType isn't needed.
|
||||
table = create_test_table(dynamodb, StreamSpecification={'StreamEnabled': False},
|
||||
KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }],
|
||||
AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }]);
|
||||
table.delete();
|
||||
# DynamoDB doesn't allow StreamSpecification to be empty map - if it
|
||||
# exists, it must have a StreamEnabled
|
||||
# Unfortunately, new versions of boto3 doesn't let us pass this...
|
||||
#with pytest.raises(ClientError, match='ValidationException'):
|
||||
# table = create_test_table(dynamodb, StreamSpecification={},
|
||||
# KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }],
|
||||
# AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }]);
|
||||
# table.delete();
|
||||
# Unfortunately, boto3 doesn't allow us to pass StreamSpecification=None.
|
||||
# This is what we had in issue #5796.
|
||||
|
||||
@pytest.mark.xfail(reason="streams not yet implemented")
|
||||
def test_table_streams_on(dynamodb):
|
||||
table = create_test_table(dynamodb,
|
||||
StreamSpecification={'StreamEnabled': True, 'StreamViewType': 'OLD_IMAGE'},
|
||||
KeySchema=[{ 'AttributeName': 'p', 'KeyType': 'HASH' }],
|
||||
AttributeDefinitions=[{ 'AttributeName': 'p', 'AttributeType': 'S' }]);
|
||||
table.delete();
|
||||
|
||||
# Our first implementation had a special column name called "attrs" where
|
||||
# we stored a map for all non-key columns. If the user tried to name one
|
||||
# of the key columns with this same name, the result was a disaster - Scylla
|
||||
@@ -370,7 +370,7 @@ def test_update_expression_cannot_modify_key(test_table):
|
||||
|
||||
# Test that trying to start an expression with some nonsense like HELLO
|
||||
# instead of SET, REMOVE, ADD or DELETE, fails.
|
||||
def test_update_expression_non_existent_clause(test_table_s):
|
||||
def test_update_expression_non_existant_clause(test_table_s):
|
||||
p = random_string()
|
||||
with pytest.raises(ClientError, match='ValidationException'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
@@ -584,7 +584,7 @@ def test_update_expression_if_not_exists(test_table_s):
|
||||
# value may itself be a function call - ad infinitum. So expressions like
|
||||
# list_append(if_not_exists(a, :val1), :val2) are legal and so is deeper
|
||||
# nesting.
|
||||
@pytest.mark.xfail(reason="for unknown reason, DynamoDB does not allow nesting list_append")
|
||||
@pytest.mark.xfail(reason="SET functions not yet implemented")
|
||||
def test_update_expression_function_nesting(test_table_s):
|
||||
p = random_string()
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
@@ -717,28 +717,10 @@ def test_update_expression_delete_sets(test_table_s):
|
||||
UpdateExpression='DELETE a :val1',
|
||||
ExpressionAttributeValues={':val1': set(['pig'])})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item']['a'] == set(['dog'])
|
||||
# Deleting all the elements cannot leave an empty set (which isn't
|
||||
# supported). Rather, it deletes the attribute altogether:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='DELETE a :val1',
|
||||
ExpressionAttributeValues={':val1': set(['dog'])})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'b': 'hi'}
|
||||
# Deleting elements from a non-existent attribute is allowed, and
|
||||
# simply does nothing:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='DELETE a :val1',
|
||||
ExpressionAttributeValues={':val1': set(['dog'])})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'b': 'hi'}
|
||||
# An empty set parameter is not allowed
|
||||
with pytest.raises(ClientError, match='ValidationException.*empty'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='DELETE a :val1',
|
||||
ExpressionAttributeValues={':val1': set([])})
|
||||
# The value to be deleted must be a set of the same type - it can't
|
||||
# be a single element or anything else. If the value has the wrong type,
|
||||
# we get an error like "Invalid UpdateExpression: Incorrect operand type
|
||||
# for operator or function; operator: DELETE, operand type: STRING".
|
||||
test_table_s.put_item(Item={'p': p, 'a': set(['dog', 'cat', 'mouse']), 'b': 'hi'})
|
||||
with pytest.raises(ClientError, match='ValidationException.*type'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='DELETE a :val1',
|
||||
@@ -870,25 +852,3 @@ def test_nested_attribute_update_bad_path_array(test_table_s):
|
||||
with pytest.raises(ClientError, match='ValidationException.*path'):
|
||||
test_table_s.update_item(Key={'p': p}, UpdateExpression='SET a[0] = :val1',
|
||||
ExpressionAttributeValues={':val1': 7})
|
||||
|
||||
# DynamoDB Does not allow empty strings, empty byte arrays, or empty sets.
|
||||
# Trying to ask UpdateItem to put one of these in an attribute should be
|
||||
# forbidden. Empty lists and maps *are* allowed.
|
||||
# Note that in test_item.py::test_update_item_empty_attribute we checked
|
||||
# this with the AttributeUpdates syntax. Here we check the same with the
|
||||
# UpdateExpression syntax.
|
||||
def test_update_expression_empty_attribute(test_table_s):
|
||||
p = random_string()
|
||||
# Empty string, byte array and set are *not* allowed
|
||||
for v in ['', bytearray('', 'utf-8'), set()]:
|
||||
with pytest.raises(ClientError, match='ValidationException.*empty'):
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET a = :v',
|
||||
ExpressionAttributeValues={':v': v})
|
||||
assert not 'Item' in test_table_s.get_item(Key={'p': p}, ConsistentRead=True)
|
||||
# But empty lists and maps *are* allowed:
|
||||
test_table_s.update_item(Key={'p': p},
|
||||
UpdateExpression='SET d = :v1, e = :v2',
|
||||
ExpressionAttributeValues={':v1': [], ':v2': {}})
|
||||
assert test_table_s.get_item(Key={'p': p}, ConsistentRead=True)['Item'] == {'p': p, 'd': [], 'e': {}}
|
||||
#
|
||||
121
alternator-test/util.py
Normal file
121
alternator-test/util.py
Normal file
@@ -0,0 +1,121 @@
|
||||
# Copyright 2019 ScyllaDB
|
||||
#
|
||||
# This file is part of Scylla.
|
||||
#
|
||||
# Scylla is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Scylla is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Various utility functions which are useful for multiple tests
|
||||
|
||||
import string
|
||||
import random
|
||||
import collections
|
||||
import time
|
||||
|
||||
def random_string(length=10, chars=string.ascii_uppercase + string.digits):
|
||||
return ''.join(random.choice(chars) for x in range(length))
|
||||
|
||||
def random_bytes(length=10):
|
||||
return bytearray(random.getrandbits(8) for _ in range(length))
|
||||
|
||||
# Utility functions for scan and query into an array of items:
|
||||
# TODO: add to full_scan and full_query by default ConsistentRead=True, as
|
||||
# it's not useful for tests without it!
|
||||
def full_scan(table, **kwargs):
|
||||
response = table.scan(**kwargs)
|
||||
items = response['Items']
|
||||
while 'LastEvaluatedKey' in response:
|
||||
response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'], **kwargs)
|
||||
items.extend(response['Items'])
|
||||
return items
|
||||
|
||||
# Utility function for fetching the entire results of a query into an array of items
|
||||
def full_query(table, **kwargs):
|
||||
response = table.query(**kwargs)
|
||||
items = response['Items']
|
||||
while 'LastEvaluatedKey' in response:
|
||||
response = table.query(ExclusiveStartKey=response['LastEvaluatedKey'], **kwargs)
|
||||
items.extend(response['Items'])
|
||||
return items
|
||||
|
||||
# To compare two lists of items (each is a dict) without regard for order,
|
||||
# "==" is not good enough because it will fail if the order is different.
|
||||
# The following function, multiset() converts the list into a multiset
|
||||
# (set with duplicates) where order doesn't matter, so the multisets can
|
||||
# be compared.
|
||||
|
||||
def freeze(item):
|
||||
if isinstance(item, dict):
|
||||
return frozenset((key, freeze(value)) for key, value in item.items())
|
||||
elif isinstance(item, list):
|
||||
return tuple(freeze(value) for value in item)
|
||||
return item
|
||||
|
||||
def multiset(items):
|
||||
return collections.Counter([freeze(item) for item in items])
|
||||
|
||||
|
||||
test_table_prefix = 'alternator_test_'
|
||||
def test_table_name():
|
||||
current_ms = int(round(time.time() * 1000))
|
||||
# In the off chance that test_table_name() is called twice in the same millisecond...
|
||||
if test_table_name.last_ms >= current_ms:
|
||||
current_ms = test_table_name.last_ms + 1
|
||||
test_table_name.last_ms = current_ms
|
||||
return test_table_prefix + str(current_ms)
|
||||
test_table_name.last_ms = 0
|
||||
|
||||
def create_test_table(dynamodb, **kwargs):
|
||||
name = test_table_name()
|
||||
print("fixture creating new table {}".format(name))
|
||||
table = dynamodb.create_table(TableName=name,
|
||||
BillingMode='PAY_PER_REQUEST', **kwargs)
|
||||
waiter = table.meta.client.get_waiter('table_exists')
|
||||
# recheck every second instead of the default, lower, frequency. This can
|
||||
# save a few seconds on AWS with its very slow table creation, but can
|
||||
# more on tests on Scylla with its faster table creation turnaround.
|
||||
waiter.config.delay = 1
|
||||
waiter.config.max_attempts = 200
|
||||
waiter.wait(TableName=name)
|
||||
return table
|
||||
|
||||
# DynamoDB's ListTables request returns up to a single page of table names
|
||||
# (e.g., up to 100) and it is up to the caller to call it again and again
|
||||
# to get the next page. This is a utility function which calls it repeatedly
|
||||
# as much as necessary to get the entire list.
|
||||
# We deliberately return a list and not a set, because we want the caller
|
||||
# to be able to recognize bugs in ListTables which causes the same table
|
||||
# to be returned twice.
|
||||
def list_tables(dynamodb, limit=100):
|
||||
ret = []
|
||||
pos = None
|
||||
while True:
|
||||
if pos:
|
||||
page = dynamodb.meta.client.list_tables(Limit=limit, ExclusiveStartTableName=pos);
|
||||
else:
|
||||
page = dynamodb.meta.client.list_tables(Limit=limit);
|
||||
results = page.get('TableNames', None)
|
||||
assert(results)
|
||||
ret = ret + results
|
||||
newpos = page.get('LastEvaluatedTableName', None)
|
||||
if not newpos:
|
||||
break;
|
||||
# It doesn't make sense for Dynamo to tell us we need more pages, but
|
||||
# not send anything in *this* page!
|
||||
assert len(results) > 0
|
||||
assert newpos != pos
|
||||
# Note that we only checked that we got back tables, not that we got
|
||||
# any new tables not already in ret. So a buggy implementation might
|
||||
# still cause an endless loop getting the same tables again and again.
|
||||
pos = newpos
|
||||
return ret
|
||||
@@ -1,147 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "alternator/error.hh"
|
||||
#include "log.hh"
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <gnutls/crypto.h>
|
||||
#include <seastar/util/defer.hh>
|
||||
#include "hashers.hh"
|
||||
#include "bytes.hh"
|
||||
#include "alternator/auth.hh"
|
||||
#include <fmt/format.h>
|
||||
#include "auth/common.hh"
|
||||
#include "auth/password_authenticator.hh"
|
||||
#include "auth/roles-metadata.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
static logging::logger alogger("alternator-auth");
|
||||
|
||||
static hmac_sha256_digest hmac_sha256(std::string_view key, std::string_view msg) {
|
||||
hmac_sha256_digest digest;
|
||||
int ret = gnutls_hmac_fast(GNUTLS_MAC_SHA256, key.data(), key.size(), msg.data(), msg.size(), digest.data());
|
||||
if (ret) {
|
||||
throw std::runtime_error(fmt::format("Computing HMAC failed ({}): {}", ret, gnutls_strerror(ret)));
|
||||
}
|
||||
return digest;
|
||||
}
|
||||
|
||||
static hmac_sha256_digest get_signature_key(std::string_view key, std::string_view date_stamp, std::string_view region_name, std::string_view service_name) {
|
||||
auto date = hmac_sha256("AWS4" + std::string(key), date_stamp);
|
||||
auto region = hmac_sha256(std::string_view(date.data(), date.size()), region_name);
|
||||
auto service = hmac_sha256(std::string_view(region.data(), region.size()), service_name);
|
||||
auto signing = hmac_sha256(std::string_view(service.data(), service.size()), "aws4_request");
|
||||
return signing;
|
||||
}
|
||||
|
||||
static std::string apply_sha256(std::string_view msg) {
|
||||
sha256_hasher hasher;
|
||||
hasher.update(msg.data(), msg.size());
|
||||
return to_hex(hasher.finalize());
|
||||
}
|
||||
|
||||
static std::string format_time_point(db_clock::time_point tp) {
|
||||
time_t time_point_repr = db_clock::to_time_t(tp);
|
||||
std::string time_point_str;
|
||||
time_point_str.resize(17);
|
||||
::tm time_buf;
|
||||
// strftime prints the terminating null character as well
|
||||
std::strftime(time_point_str.data(), time_point_str.size(), "%Y%m%dT%H%M%SZ", ::gmtime_r(&time_point_repr, &time_buf));
|
||||
time_point_str.resize(16);
|
||||
return time_point_str;
|
||||
}
|
||||
|
||||
void check_expiry(std::string_view signature_date) {
|
||||
//FIXME: The default 15min can be changed with X-Amz-Expires header - we should honor it
|
||||
std::string expiration_str = format_time_point(db_clock::now() - 15min);
|
||||
std::string validity_str = format_time_point(db_clock::now() + 15min);
|
||||
if (signature_date < expiration_str) {
|
||||
throw api_error("InvalidSignatureException",
|
||||
fmt::format("Signature expired: {} is now earlier than {} (current time - 15 min.)",
|
||||
signature_date, expiration_str));
|
||||
}
|
||||
if (signature_date > validity_str) {
|
||||
throw api_error("InvalidSignatureException",
|
||||
fmt::format("Signature not yet current: {} is still later than {} (current time + 15 min.)",
|
||||
signature_date, validity_str));
|
||||
}
|
||||
}
|
||||
|
||||
std::string get_signature(std::string_view access_key_id, std::string_view secret_access_key, std::string_view host, std::string_view method,
|
||||
std::string_view orig_datestamp, std::string_view signed_headers_str, const std::map<std::string_view, std::string_view>& signed_headers_map,
|
||||
std::string_view body_content, std::string_view region, std::string_view service, std::string_view query_string) {
|
||||
auto amz_date_it = signed_headers_map.find("x-amz-date");
|
||||
if (amz_date_it == signed_headers_map.end()) {
|
||||
throw api_error("InvalidSignatureException", "X-Amz-Date header is mandatory for signature verification");
|
||||
}
|
||||
std::string_view amz_date = amz_date_it->second;
|
||||
check_expiry(amz_date);
|
||||
std::string_view datestamp = amz_date.substr(0, 8);
|
||||
if (datestamp != orig_datestamp) {
|
||||
throw api_error("InvalidSignatureException",
|
||||
format("X-Amz-Date date does not match the provided datestamp. Expected {}, got {}",
|
||||
orig_datestamp, datestamp));
|
||||
}
|
||||
std::string_view canonical_uri = "/";
|
||||
|
||||
std::stringstream canonical_headers;
|
||||
for (const auto& header : signed_headers_map) {
|
||||
canonical_headers << fmt::format("{}:{}", header.first, header.second) << '\n';
|
||||
}
|
||||
|
||||
std::string payload_hash = apply_sha256(body_content);
|
||||
std::string canonical_request = fmt::format("{}\n{}\n{}\n{}\n{}\n{}", method, canonical_uri, query_string, canonical_headers.str(), signed_headers_str, payload_hash);
|
||||
|
||||
std::string_view algorithm = "AWS4-HMAC-SHA256";
|
||||
std::string credential_scope = fmt::format("{}/{}/{}/aws4_request", datestamp, region, service);
|
||||
std::string string_to_sign = fmt::format("{}\n{}\n{}\n{}", algorithm, amz_date, credential_scope, apply_sha256(canonical_request));
|
||||
|
||||
hmac_sha256_digest signing_key = get_signature_key(secret_access_key, datestamp, region, service);
|
||||
hmac_sha256_digest signature = hmac_sha256(std::string_view(signing_key.data(), signing_key.size()), string_to_sign);
|
||||
|
||||
return to_hex(bytes_view(reinterpret_cast<const int8_t*>(signature.data()), signature.size()));
|
||||
}
|
||||
|
||||
future<std::string> get_key_from_roles(cql3::query_processor& qp, std::string username) {
|
||||
static const sstring query = format("SELECT salted_hash FROM {} WHERE {} = ?",
|
||||
auth::meta::roles_table::qualified_name(), auth::meta::roles_table::role_col_name);
|
||||
|
||||
auto cl = auth::password_authenticator::consistency_for_user(username);
|
||||
auto& timeout = auth::internal_distributed_timeout_config();
|
||||
return qp.execute_internal(query, cl, timeout, {sstring(username)}, true).then_wrapped([username = std::move(username)] (future<::shared_ptr<cql3::untyped_result_set>> f) {
|
||||
auto res = f.get0();
|
||||
auto salted_hash = std::optional<sstring>();
|
||||
if (res->empty()) {
|
||||
throw api_error("UnrecognizedClientException", fmt::format("User not found: {}", username));
|
||||
}
|
||||
salted_hash = res->one().get_opt<sstring>("salted_hash");
|
||||
if (!salted_hash) {
|
||||
throw api_error("UnrecognizedClientException", fmt::format("No password found for user: {}", username));
|
||||
}
|
||||
return make_ready_future<std::string>(*salted_hash);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <array>
|
||||
#include "gc_clock.hh"
|
||||
#include "utils/loading_cache.hh"
|
||||
|
||||
namespace cql3 {
|
||||
class query_processor;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
using hmac_sha256_digest = std::array<char, 32>;
|
||||
|
||||
using key_cache = utils::loading_cache<std::string, std::string>;
|
||||
|
||||
std::string get_signature(std::string_view access_key_id, std::string_view secret_access_key, std::string_view host, std::string_view method,
|
||||
std::string_view orig_datestamp, std::string_view signed_headers_str, const std::map<std::string_view, std::string_view>& signed_headers_map,
|
||||
std::string_view body_content, std::string_view region, std::string_view service, std::string_view query_string);
|
||||
|
||||
future<std::string> get_key_from_roles(cql3::query_processor& qp, std::string username);
|
||||
|
||||
}
|
||||
@@ -21,14 +21,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
#include <string>
|
||||
#include "bytes.hh"
|
||||
#include "rjson.hh"
|
||||
|
||||
std::string base64_encode(bytes_view);
|
||||
|
||||
bytes base64_decode(std::string_view);
|
||||
|
||||
inline bytes base64_decode(const rjson::value& v) {
|
||||
return base64_decode(std::string_view(v.GetString(), v.GetStringLength()));
|
||||
}
|
||||
|
||||
@@ -27,14 +27,6 @@
|
||||
#include "cql3/constants.hh"
|
||||
#include <unordered_map>
|
||||
#include "rjson.hh"
|
||||
#include "serialization.hh"
|
||||
#include "base64.hh"
|
||||
#include <stdexcept>
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
#include "expressions_eval.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -43,19 +35,13 @@ static logging::logger clogger("alternator-conditions");
|
||||
comparison_operator_type get_comparison_operator(const rjson::value& comparison_operator) {
|
||||
static std::unordered_map<std::string, comparison_operator_type> ops = {
|
||||
{"EQ", comparison_operator_type::EQ},
|
||||
{"NE", comparison_operator_type::NE},
|
||||
{"LE", comparison_operator_type::LE},
|
||||
{"LT", comparison_operator_type::LT},
|
||||
{"GE", comparison_operator_type::GE},
|
||||
{"GT", comparison_operator_type::GT},
|
||||
{"IN", comparison_operator_type::IN},
|
||||
{"NULL", comparison_operator_type::IS_NULL},
|
||||
{"NOT_NULL", comparison_operator_type::NOT_NULL},
|
||||
{"BETWEEN", comparison_operator_type::BETWEEN},
|
||||
{"BEGINS_WITH", comparison_operator_type::BEGINS_WITH},
|
||||
{"CONTAINS", comparison_operator_type::CONTAINS},
|
||||
{"NOT_CONTAINS", comparison_operator_type::NOT_CONTAINS},
|
||||
};
|
||||
}; //TODO(sarna): NE, IN, CONTAINS, NULL, NOT_NULL
|
||||
if (!comparison_operator.IsString()) {
|
||||
throw api_error("ValidationException", format("Invalid comparison operator definition {}", rjson::print(comparison_operator)));
|
||||
}
|
||||
@@ -76,7 +62,7 @@ static ::shared_ptr<cql3::restrictions::single_column_restriction::contains> mak
|
||||
}
|
||||
|
||||
static ::shared_ptr<cql3::restrictions::single_column_restriction::EQ> make_key_eq_restriction(const column_definition& cdef, const rjson::value& value) {
|
||||
bytes raw_value = get_key_from_typed_value(value, cdef);
|
||||
bytes raw_value = get_key_from_typed_value(value, cdef, type_to_string(cdef.type));
|
||||
auto restriction_value = ::make_shared<cql3::constants::value>(cql3::raw_value::make_value(std::move(raw_value)));
|
||||
return make_shared<cql3::restrictions::single_column_restriction::EQ>(cdef, std::move(restriction_value));
|
||||
}
|
||||
@@ -110,342 +96,30 @@ static ::shared_ptr<cql3::restrictions::single_column_restriction::EQ> make_key_
|
||||
return filtering_restrictions;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct size_check {
|
||||
// True iff size passes this check.
|
||||
virtual bool operator()(rapidjson::SizeType size) const = 0;
|
||||
// Check description, such that format("expected array {}", check.what()) is human-readable.
|
||||
virtual sstring what() const = 0;
|
||||
};
|
||||
|
||||
class exact_size : public size_check {
|
||||
rapidjson::SizeType _expected;
|
||||
public:
|
||||
explicit exact_size(rapidjson::SizeType expected) : _expected(expected) {}
|
||||
bool operator()(rapidjson::SizeType size) const override { return size == _expected; }
|
||||
sstring what() const override { return format("of size {}", _expected); }
|
||||
};
|
||||
|
||||
struct empty : public size_check {
|
||||
bool operator()(rapidjson::SizeType size) const override { return size < 1; }
|
||||
sstring what() const override { return "to be empty"; }
|
||||
};
|
||||
|
||||
struct nonempty : public size_check {
|
||||
bool operator()(rapidjson::SizeType size) const override { return size > 0; }
|
||||
sstring what() const override { return "to be non-empty"; }
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// Check that array has the expected number of elements
|
||||
static void verify_operand_count(const rjson::value* array, const size_check& expected, const rjson::value& op) {
|
||||
if (!array && expected(0)) {
|
||||
// If expected() allows an empty AttributeValueList, it is also fine
|
||||
// that it is missing.
|
||||
return;
|
||||
}
|
||||
if (!array || !array->IsArray()) {
|
||||
throw api_error("ValidationException", "With ComparisonOperator, AttributeValueList must be given and an array");
|
||||
}
|
||||
if (!expected(array->Size())) {
|
||||
throw api_error("ValidationException",
|
||||
format("{} operator requires AttributeValueList {}, instead found list size {}",
|
||||
op, expected.what(), array->Size()));
|
||||
}
|
||||
}
|
||||
|
||||
struct rjson_engaged_ptr_comp {
|
||||
bool operator()(const rjson::value* p1, const rjson::value* p2) const {
|
||||
return rjson::single_value_comp()(*p1, *p2);
|
||||
}
|
||||
};
|
||||
|
||||
// It's not enough to compare underlying JSON objects when comparing sets,
|
||||
// as internally they're stored in an array, and the order of elements is
|
||||
// not important in set equality. See issue #5021
|
||||
static bool check_EQ_for_sets(const rjson::value& set1, const rjson::value& set2) {
|
||||
if (set1.Size() != set2.Size()) {
|
||||
return false;
|
||||
}
|
||||
std::set<const rjson::value*, rjson_engaged_ptr_comp> set1_raw;
|
||||
for (auto it = set1.Begin(); it != set1.End(); ++it) {
|
||||
set1_raw.insert(&*it);
|
||||
}
|
||||
for (const auto& a : set2.GetArray()) {
|
||||
if (set1_raw.count(&a) == 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the EQ relation
|
||||
static bool check_EQ(const rjson::value* v1, const rjson::value& v2) {
|
||||
if (!v1) {
|
||||
return false;
|
||||
}
|
||||
if (v1->IsObject() && v1->MemberCount() == 1 && v2.IsObject() && v2.MemberCount() == 1) {
|
||||
auto it1 = v1->MemberBegin();
|
||||
auto it2 = v2.MemberBegin();
|
||||
if ((it1->name == "SS" && it2->name == "SS") || (it1->name == "NS" && it2->name == "NS") || (it1->name == "BS" && it2->name == "BS")) {
|
||||
return check_EQ_for_sets(it1->value, it2->value);
|
||||
}
|
||||
}
|
||||
return *v1 == v2;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the NE relation
|
||||
static bool check_NE(const rjson::value* v1, const rjson::value& v2) {
|
||||
return !v1 || *v1 != v2; // null is unequal to anything.
|
||||
static bool check_EQ(const rjson::value& v1, const rjson::value& v2) {
|
||||
return v1 == v2;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the BEGINS_WITH relation
|
||||
static bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2) {
|
||||
// BEGINS_WITH requires that its single operand (v2) be a string or
|
||||
// binary - otherwise it's a validation error. However, problems with
|
||||
// the stored attribute (v1) will just return false (no match).
|
||||
if (!v2.IsObject() || v2.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BEGINS_WITH operator encountered malformed AttributeValue: {}", v2));
|
||||
}
|
||||
auto it2 = v2.MemberBegin();
|
||||
if (it2->name != "S" && it2->name != "B") {
|
||||
throw api_error("ValidationException", format("BEGINS_WITH operator requires String or Binary in AttributeValue, got {}", it2->name));
|
||||
}
|
||||
|
||||
|
||||
if (!v1 || !v1->IsObject() || v1->MemberCount() != 1) {
|
||||
static bool check_BEGINS_WITH(const rjson::value& v1, const rjson::value& v2) {
|
||||
// BEGINS_WITH only supports comparing two strings or two binaries -
|
||||
// any other combinations of types, or other malformed values, return
|
||||
// false (no match).
|
||||
if (!v1.IsObject() || v1.MemberCount() != 1 || !v2.IsObject() || v2.MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
auto it1 = v1->MemberBegin();
|
||||
auto it1 = v1.MemberBegin();
|
||||
auto it2 = v2.MemberBegin();
|
||||
if (it1->name != it2->name) {
|
||||
return false;
|
||||
}
|
||||
if (it2->name == "S") {
|
||||
std::string_view val1(it1->value.GetString(), it1->value.GetStringLength());
|
||||
std::string_view val2(it2->value.GetString(), it2->value.GetStringLength());
|
||||
return val1.substr(0, val2.size()) == val2;
|
||||
} else /* it2->name == "B" */ {
|
||||
// TODO (optimization): Check the begins_with condition directly on
|
||||
// the base64-encoded string, without making a decoded copy.
|
||||
bytes val1 = base64_decode(it1->value);
|
||||
bytes val2 = base64_decode(it2->value);
|
||||
return val1.substr(0, val2.size()) == val2;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_set_of(const rjson::value& type1, const rjson::value& type2) {
|
||||
return (type2 == "S" && type1 == "SS") || (type2 == "N" && type1 == "NS") || (type2 == "B" && type1 == "BS");
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the CONTAINS relation
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2) {
|
||||
if (!v1) {
|
||||
if (it1->name != "S" && it1->name != "B") {
|
||||
return false;
|
||||
}
|
||||
const auto& kv1 = *v1->MemberBegin();
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv2.name != "S" && kv2.name != "N" && kv2.name != "B") {
|
||||
throw api_error("ValidationException",
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", kv2.name));
|
||||
}
|
||||
if (kv1.name == "S" && kv2.name == "S") {
|
||||
return rjson::to_string_view(kv1.value).find(rjson::to_string_view(kv2.value)) != std::string_view::npos;
|
||||
} else if (kv1.name == "B" && kv2.name == "B") {
|
||||
return base64_decode(kv1.value).find(base64_decode(kv2.value)) != bytes::npos;
|
||||
} else if (is_set_of(kv1.name, kv2.name)) {
|
||||
for (auto i = kv1.value.Begin(); i != kv1.value.End(); ++i) {
|
||||
if (*i == kv2.value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else if (kv1.name == "L") {
|
||||
for (auto i = kv1.value.Begin(); i != kv1.value.End(); ++i) {
|
||||
if (!i->IsObject() || i->MemberCount() != 1) {
|
||||
clogger.error("check_CONTAINS received a list whose element is malformed");
|
||||
return false;
|
||||
}
|
||||
const auto& el = *i->MemberBegin();
|
||||
if (el.name == kv2.name && el.value == kv2.value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the NOT_CONTAINS relation
|
||||
static bool check_NOT_CONTAINS(const rjson::value* v1, const rjson::value& v2) {
|
||||
if (!v1) {
|
||||
return false;
|
||||
}
|
||||
return !check_CONTAINS(v1, v2);
|
||||
}
|
||||
|
||||
// Check if a JSON-encoded value equals any element of an array, which must have at least one element.
|
||||
static bool check_IN(const rjson::value* val, const rjson::value& array) {
|
||||
if (!array[0].IsObject() || array[0].MemberCount() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("IN operator encountered malformed AttributeValue: {}", array[0]));
|
||||
}
|
||||
const auto& type = array[0].MemberBegin()->name;
|
||||
if (type != "S" && type != "N" && type != "B") {
|
||||
throw api_error("ValidationException",
|
||||
"IN operator requires AttributeValueList elements to be of type String, Number, or Binary ");
|
||||
}
|
||||
if (!val) {
|
||||
return false;
|
||||
}
|
||||
bool have_match = false;
|
||||
for (const auto& elem : array.GetArray()) {
|
||||
if (!elem.IsObject() || elem.MemberCount() != 1 || elem.MemberBegin()->name != type) {
|
||||
throw api_error("ValidationException",
|
||||
"IN operator requires all AttributeValueList elements to have the same type ");
|
||||
}
|
||||
if (!have_match && *val == elem) {
|
||||
// Can't return yet, must check types of all array elements. <sigh>
|
||||
have_match = true;
|
||||
}
|
||||
}
|
||||
return have_match;
|
||||
}
|
||||
|
||||
// Another variant of check_IN, this one for ConditionExpression. It needs to
|
||||
// check whether the first element in the given vector is equal to any of the
|
||||
// others.
|
||||
static bool check_IN(const std::vector<rjson::value>& array) {
|
||||
const rjson::value* first = &array[0];
|
||||
for (unsigned i = 1; i < array.size(); i++) {
|
||||
if (check_EQ(first, array[i])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool check_NULL(const rjson::value* val) {
|
||||
return val == nullptr;
|
||||
}
|
||||
|
||||
static bool check_NOT_NULL(const rjson::value* val) {
|
||||
return val != nullptr;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with cmp.
|
||||
template <typename Comparator>
|
||||
bool check_compare(const rjson::value* v1, const rjson::value& v2, const Comparator& cmp) {
|
||||
if (!v2.IsObject() || v2.MemberCount() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("{} requires a single AttributeValue of type String, Number, or Binary",
|
||||
cmp.diagnostic));
|
||||
}
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv2.name != "S" && kv2.name != "N" && kv2.name != "B") {
|
||||
throw api_error("ValidationException",
|
||||
format("{} requires a single AttributeValue of type String, Number, or Binary",
|
||||
cmp.diagnostic));
|
||||
}
|
||||
if (!v1 || !v1->IsObject() || v1->MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
const auto& kv1 = *v1->MemberBegin();
|
||||
if (kv1.name != kv2.name) {
|
||||
return false;
|
||||
}
|
||||
if (kv1.name == "N") {
|
||||
return cmp(unwrap_number(*v1, cmp.diagnostic), unwrap_number(v2, cmp.diagnostic));
|
||||
}
|
||||
if (kv1.name == "S") {
|
||||
return cmp(std::string_view(kv1.value.GetString(), kv1.value.GetStringLength()),
|
||||
std::string_view(kv2.value.GetString(), kv2.value.GetStringLength()));
|
||||
}
|
||||
if (kv1.name == "B") {
|
||||
return cmp(base64_decode(kv1.value), base64_decode(kv2.value));
|
||||
}
|
||||
clogger.error("check_compare panic: LHS type equals RHS type, but one is in {N,S,B} while the other isn't");
|
||||
return false;
|
||||
}
|
||||
|
||||
struct cmp_lt {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs < rhs; }
|
||||
// We cannot use the normal comparison operators like "<" on the bytes
|
||||
// type, because they treat individual bytes as signed but we need to
|
||||
// compare them as *unsigned*. So we need a specialization for bytes.
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) < 0; }
|
||||
static constexpr const char* diagnostic = "LT operator";
|
||||
};
|
||||
|
||||
struct cmp_le {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs <= rhs; }
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) <= 0; }
|
||||
static constexpr const char* diagnostic = "LE operator";
|
||||
};
|
||||
|
||||
struct cmp_ge {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs >= rhs; }
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) >= 0; }
|
||||
static constexpr const char* diagnostic = "GE operator";
|
||||
};
|
||||
|
||||
struct cmp_gt {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs > rhs; }
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) > 0; }
|
||||
static constexpr const char* diagnostic = "GT operator";
|
||||
};
|
||||
|
||||
// True if v is between lb and ub, inclusive. Throws if lb > ub.
|
||||
template <typename T>
|
||||
bool check_BETWEEN(const T& v, const T& lb, const T& ub) {
|
||||
if (cmp_lt()(ub, lb)) {
|
||||
throw api_error("ValidationException",
|
||||
format("BETWEEN operator requires lower_bound <= upper_bound, but {} > {}", lb, ub));
|
||||
}
|
||||
return cmp_ge()(v, lb) && cmp_le()(v, ub);
|
||||
}
|
||||
|
||||
static bool check_BETWEEN(const rjson::value* v, const rjson::value& lb, const rjson::value& ub) {
|
||||
if (!v) {
|
||||
return false;
|
||||
}
|
||||
if (!v->IsObject() || v->MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BETWEEN operator encountered malformed AttributeValue: {}", *v));
|
||||
}
|
||||
if (!lb.IsObject() || lb.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BETWEEN operator encountered malformed AttributeValue: {}", lb));
|
||||
}
|
||||
if (!ub.IsObject() || ub.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("BETWEEN operator encountered malformed AttributeValue: {}", ub));
|
||||
}
|
||||
|
||||
const auto& kv_v = *v->MemberBegin();
|
||||
const auto& kv_lb = *lb.MemberBegin();
|
||||
const auto& kv_ub = *ub.MemberBegin();
|
||||
if (kv_lb.name != kv_ub.name) {
|
||||
throw api_error(
|
||||
"ValidationException",
|
||||
format("BETWEEN operator requires the same type for lower and upper bound; instead got {} and {}",
|
||||
kv_lb.name, kv_ub.name));
|
||||
}
|
||||
if (kv_v.name != kv_lb.name) { // Cannot compare different types, so v is NOT between lb and ub.
|
||||
return false;
|
||||
}
|
||||
if (kv_v.name == "N") {
|
||||
const char* diag = "BETWEEN operator";
|
||||
return check_BETWEEN(unwrap_number(*v, diag), unwrap_number(lb, diag), unwrap_number(ub, diag));
|
||||
}
|
||||
if (kv_v.name == "S") {
|
||||
return check_BETWEEN(std::string_view(kv_v.value.GetString(), kv_v.value.GetStringLength()),
|
||||
std::string_view(kv_lb.value.GetString(), kv_lb.value.GetStringLength()),
|
||||
std::string_view(kv_ub.value.GetString(), kv_ub.value.GetStringLength()));
|
||||
}
|
||||
if (kv_v.name == "B") {
|
||||
return check_BETWEEN(base64_decode(kv_v.value), base64_decode(kv_lb.value), base64_decode(kv_ub.value));
|
||||
}
|
||||
throw api_error("ValidationException",
|
||||
format("BETWEEN operator requires AttributeValueList elements to be of type String, Number, or Binary; instead got {}",
|
||||
kv_lb.name));
|
||||
std::string_view val1(it1->value.GetString(), it1->value.GetStringLength());
|
||||
std::string_view val2(it2->value.GetString(), it2->value.GetStringLength());
|
||||
return val1.substr(0, val2.size()) == val2;
|
||||
}
|
||||
|
||||
// Verify one Expect condition on one attribute (whose content is "got")
|
||||
@@ -468,7 +142,7 @@ static bool verify_expected_one(const rjson::value& condition, const rjson::valu
|
||||
if (comparison_operator) {
|
||||
throw api_error("ValidationException", "Cannot combine Value with ComparisonOperator");
|
||||
}
|
||||
return check_EQ(got, *value);
|
||||
return got && check_EQ(*got, *value);
|
||||
} else if (exists) {
|
||||
if (comparison_operator) {
|
||||
throw api_error("ValidationException", "Cannot combine Exists with ComparisonOperator");
|
||||
@@ -482,61 +156,46 @@ static bool verify_expected_one(const rjson::value& condition, const rjson::valu
|
||||
if (!comparison_operator) {
|
||||
throw api_error("ValidationException", "Missing ComparisonOperator, Value or Exists");
|
||||
}
|
||||
if (!attribute_value_list || !attribute_value_list->IsArray()) {
|
||||
throw api_error("ValidationException", "With ComparisonOperator, AttributeValueList must be given and an array");
|
||||
}
|
||||
comparison_operator_type op = get_comparison_operator(*comparison_operator);
|
||||
switch (op) {
|
||||
case comparison_operator_type::EQ:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_EQ(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::NE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_NE(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::LT:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_lt{});
|
||||
case comparison_operator_type::LE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_le{});
|
||||
case comparison_operator_type::GT:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_gt{});
|
||||
case comparison_operator_type::GE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_ge{});
|
||||
if (attribute_value_list->Size() != 1) {
|
||||
throw api_error("ValidationException", "EQ operator requires one element in AttributeValueList");
|
||||
}
|
||||
if (got) {
|
||||
const rjson::value& expected = (*attribute_value_list)[0];
|
||||
return check_EQ(*got, expected);
|
||||
}
|
||||
return false;
|
||||
case comparison_operator_type::BEGINS_WITH:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_BEGINS_WITH(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::IN:
|
||||
verify_operand_count(attribute_value_list, nonempty(), *comparison_operator);
|
||||
return check_IN(got, *attribute_value_list);
|
||||
case comparison_operator_type::IS_NULL:
|
||||
verify_operand_count(attribute_value_list, empty(), *comparison_operator);
|
||||
return check_NULL(got);
|
||||
case comparison_operator_type::NOT_NULL:
|
||||
verify_operand_count(attribute_value_list, empty(), *comparison_operator);
|
||||
return check_NOT_NULL(got);
|
||||
case comparison_operator_type::BETWEEN:
|
||||
verify_operand_count(attribute_value_list, exact_size(2), *comparison_operator);
|
||||
return check_BETWEEN(got, (*attribute_value_list)[0], (*attribute_value_list)[1]);
|
||||
case comparison_operator_type::CONTAINS:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_CONTAINS(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::NOT_CONTAINS:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_NOT_CONTAINS(got, (*attribute_value_list)[0]);
|
||||
if (attribute_value_list->Size() != 1) {
|
||||
throw api_error("ValidationException", "BEGINS_WITH operator requires one element in AttributeValueList");
|
||||
}
|
||||
if (got) {
|
||||
const rjson::value& expected = (*attribute_value_list)[0];
|
||||
return check_BEGINS_WITH(*got, expected);
|
||||
}
|
||||
return false;
|
||||
default:
|
||||
// FIXME: implement all the missing types, so there will be no default here.
|
||||
throw api_error("ValidationException", format("ComparisonOperator {} is not yet supported", *comparison_operator));
|
||||
}
|
||||
throw std::logic_error(format("Internal error: corrupted operator enum: {}", int(op)));
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the existing values of the item (previous_item) match the
|
||||
// Verify that the existing values of the item (previous_item) match the
|
||||
// conditions given by the Expected and ConditionalOperator parameters
|
||||
// (if they exist) in the request (an UpdateItem, PutItem or DeleteItem).
|
||||
// This function can throw an ValidationException API error if there
|
||||
// This function will throw a ConditionalCheckFailedException API error
|
||||
// if the values do not match the condition, or ValidationException if there
|
||||
// are errors in the format of the condition itself.
|
||||
bool verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value>& previous_item) {
|
||||
void verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value>& previous_item) {
|
||||
const rjson::value* expected = rjson::find(req, "Expected");
|
||||
if (!expected) {
|
||||
return true;
|
||||
return;
|
||||
}
|
||||
if (!expected->IsObject()) {
|
||||
throw api_error("ValidationException", "'Expected' parameter, if given, must be an object");
|
||||
@@ -565,123 +224,22 @@ bool verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value
|
||||
for (auto it = expected->MemberBegin(); it != expected->MemberEnd(); ++it) {
|
||||
const rjson::value* got = nullptr;
|
||||
if (previous_item && previous_item->IsObject() && previous_item->HasMember("Item")) {
|
||||
got = rjson::find((*previous_item)["Item"], rjson::to_string_view(it->name));
|
||||
got = rjson::find((*previous_item)["Item"], rjson::string_ref_type(it->name.GetString()));
|
||||
}
|
||||
bool success = verify_expected_one(it->value, got);
|
||||
if (success && !require_all) {
|
||||
// When !require_all, one success is enough!
|
||||
return true;
|
||||
return;
|
||||
} else if (!success && require_all) {
|
||||
// When require_all, one failure is enough!
|
||||
return false;
|
||||
throw api_error("ConditionalCheckFailedException", "Failed condition.");
|
||||
}
|
||||
}
|
||||
// If we got here and require_all, none of the checks failed, so succeed.
|
||||
// If we got here and !require_all, all of the checks failed, so fail.
|
||||
return require_all;
|
||||
}
|
||||
|
||||
bool calculate_primitive_condition(const parsed::primitive_condition& cond,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
const rjson::value& req,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item) {
|
||||
std::vector<rjson::value> calculated_values;
|
||||
calculated_values.reserve(cond._values.size());
|
||||
for (const parsed::value& v : cond._values) {
|
||||
calculated_values.push_back(calculate_value(v,
|
||||
cond._op == parsed::primitive_condition::type::VALUE ?
|
||||
calculate_value_caller::ConditionExpressionAlone :
|
||||
calculate_value_caller::ConditionExpression,
|
||||
rjson::find(req, "ExpressionAttributeValues"),
|
||||
used_attribute_names, used_attribute_values,
|
||||
req, schema, previous_item));
|
||||
}
|
||||
switch (cond._op) {
|
||||
case parsed::primitive_condition::type::BETWEEN:
|
||||
if (calculated_values.size() != 3) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Wrong number of values {} in BETWEEN primitive_condition", cond._values.size()));
|
||||
}
|
||||
return check_BETWEEN(&calculated_values[0], calculated_values[1], calculated_values[2]);
|
||||
case parsed::primitive_condition::type::IN:
|
||||
return check_IN(calculated_values);
|
||||
case parsed::primitive_condition::type::VALUE:
|
||||
if (calculated_values.size() != 1) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Unexpected values in primitive_condition", cond._values.size()));
|
||||
}
|
||||
// Unwrap the boolean wrapped as the value (if it is a boolean)
|
||||
if (calculated_values[0].IsObject() && calculated_values[0].MemberCount() == 1) {
|
||||
auto it = calculated_values[0].MemberBegin();
|
||||
if (it->name == "BOOL" && it->value.IsBool()) {
|
||||
return it->value.GetBool();
|
||||
}
|
||||
}
|
||||
throw api_error("ValidationException",
|
||||
format("ConditionExpression: condition results in a non-boolean value: {}",
|
||||
calculated_values[0]));
|
||||
default:
|
||||
// All the rest of the operators have exactly two parameters (and unless
|
||||
// we have a bug in the parser, that's what we have in the parsed object:
|
||||
if (calculated_values.size() != 2) {
|
||||
throw std::logic_error(format("Wrong number of values {} in primitive_condition object", cond._values.size()));
|
||||
}
|
||||
}
|
||||
switch (cond._op) {
|
||||
case parsed::primitive_condition::type::EQ:
|
||||
return check_EQ(&calculated_values[0], calculated_values[1]);
|
||||
case parsed::primitive_condition::type::NE:
|
||||
return check_NE(&calculated_values[0], calculated_values[1]);
|
||||
case parsed::primitive_condition::type::GT:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_gt{});
|
||||
case parsed::primitive_condition::type::GE:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_ge{});
|
||||
case parsed::primitive_condition::type::LT:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_lt{});
|
||||
case parsed::primitive_condition::type::LE:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_le{});
|
||||
default:
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Unknown type {} in primitive_condition object", (int)(cond._op)));
|
||||
if (!require_all) {
|
||||
throw api_error("ConditionalCheckFailedException", "None of ORed Expect conditions were successful.");
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the existing values of the item (previous_item) match the
|
||||
// conditions given by the given parsed ConditionExpression.
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
const rjson::value& req,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item) {
|
||||
if (condition_expression.empty()) {
|
||||
return true;
|
||||
}
|
||||
bool ret = std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) -> bool {
|
||||
return calculate_primitive_condition(cond, used_attribute_values,
|
||||
used_attribute_names, req, schema, previous_item);
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) -> bool {
|
||||
auto verify_condition = [&] (const parsed::condition_expression& e) {
|
||||
return verify_condition_expression(e, used_attribute_values,
|
||||
used_attribute_names, req, schema, previous_item);
|
||||
};
|
||||
switch (list.op) {
|
||||
case '&':
|
||||
return boost::algorithm::all_of(list.conditions, verify_condition);
|
||||
case '|':
|
||||
return boost::algorithm::any_of(list.conditions, verify_condition);
|
||||
default:
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error("bad operator in condition_list");
|
||||
}
|
||||
}
|
||||
}, condition_expression._expression);
|
||||
return condition_expression._negated ? !ret : ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -37,13 +37,13 @@
|
||||
namespace alternator {
|
||||
|
||||
enum class comparison_operator_type {
|
||||
EQ, NE, LE, LT, GE, GT, IN, BETWEEN, CONTAINS, NOT_CONTAINS, IS_NULL, NOT_NULL, BEGINS_WITH
|
||||
EQ, NE, LE, LT, GE, GT, IN, BETWEEN, CONTAINS, IS_NULL, NOT_NULL, BEGINS_WITH
|
||||
};
|
||||
|
||||
comparison_operator_type get_comparison_operator(const rjson::value& comparison_operator);
|
||||
|
||||
::shared_ptr<cql3::restrictions::statement_restrictions> get_filtering_restrictions(schema_ptr schema, const column_definition& attrs_col, const rjson::value& query_filter);
|
||||
|
||||
bool verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value>& previous_item);
|
||||
void verify_expected(const rjson::value& req, const std::unique_ptr<rjson::value>& previous_item);
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,59 +25,47 @@
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include "seastarx.hh"
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <seastar/core/sharded.hh>
|
||||
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/client_state.hh"
|
||||
|
||||
#include "alternator/error.hh"
|
||||
#include "stats.hh"
|
||||
#include "rjson.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
class executor : public peering_sharded_service<executor> {
|
||||
class executor {
|
||||
service::storage_proxy& _proxy;
|
||||
service::migration_manager& _mm;
|
||||
// An smp_service_group to be used for limiting the concurrency when
|
||||
// forwarding Alternator request between shards - if necessary for LWT.
|
||||
smp_service_group _ssg;
|
||||
|
||||
public:
|
||||
using client_state = service::client_state;
|
||||
using request_return_type = std::variant<json::json_return_type, api_error>;
|
||||
stats _stats;
|
||||
static constexpr auto ATTRS_COLUMN_NAME = ":attrs";
|
||||
static constexpr auto KEYSPACE_NAME_PREFIX = "alternator_";
|
||||
static constexpr std::string_view INTERNAL_TABLE_PREFIX = ".scylla.alternator.";
|
||||
static constexpr auto KEYSPACE_NAME = "alternator";
|
||||
|
||||
executor(service::storage_proxy& proxy, service::migration_manager& mm, smp_service_group ssg)
|
||||
: _proxy(proxy), _mm(mm), _ssg(ssg) {}
|
||||
executor(service::storage_proxy& proxy, service::migration_manager& mm) : _proxy(proxy), _mm(mm) {}
|
||||
|
||||
future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> delete_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> put_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> delete_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_tables(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> scan(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_endpoints(client_state& client_state, service_permit permit, rjson::value request, std::string host_header);
|
||||
future<request_return_type> batch_write_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> batch_get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> query(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> tag_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> untag_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_tags_of_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<json::json_return_type> create_table(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> describe_table(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> delete_table(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> put_item(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> get_item(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> delete_item(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> update_item(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> list_tables(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> scan(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> describe_endpoints(client_state& client_state, std::string content, std::string host_header);
|
||||
future<json::json_return_type> batch_write_item(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> batch_get_item(client_state& client_state, std::string content);
|
||||
future<json::json_return_type> query(client_state& client_state, std::string content);
|
||||
|
||||
future<> start();
|
||||
future<> stop() { return make_ready_future<>(); }
|
||||
|
||||
future<> create_keyspace(std::string_view keyspace_name);
|
||||
future<> maybe_create_keyspace();
|
||||
|
||||
static tracing::trace_state_ptr maybe_trace_query(client_state& client_state, sstring_view op, sstring_view query);
|
||||
static void maybe_trace_query(client_state& client_state, sstring_view op, sstring_view query);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "expressions.hh"
|
||||
#include "alternator/expressionsLexer.hpp"
|
||||
#include "alternator/expressionsParser.hpp"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
#include <seastarx.hh>
|
||||
|
||||
@@ -66,19 +65,13 @@ parse_projection_expression(std::string query) {
|
||||
}
|
||||
}
|
||||
|
||||
parsed::condition_expression
|
||||
parse_condition_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::condition_expression));
|
||||
} catch (...) {
|
||||
throw expressions_syntax_error(format("Failed parsing ConditionExpression '{}': {}", query, std::current_exception()));
|
||||
}
|
||||
}
|
||||
template<class... Ts> struct overloaded : Ts... { using Ts::operator()...; };
|
||||
template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
|
||||
|
||||
namespace parsed {
|
||||
|
||||
void update_expression::add(update_expression::action a) {
|
||||
std::visit(overloaded_functor {
|
||||
std::visit(overloaded {
|
||||
[&] (action::set&) { seen_set = true; },
|
||||
[&] (action::remove&) { seen_remove = true; },
|
||||
[&] (action::add&) { seen_add = true; },
|
||||
@@ -101,27 +94,5 @@ void update_expression::append(update_expression other) {
|
||||
seen_del |= other.seen_del;
|
||||
}
|
||||
|
||||
void condition_expression::append(condition_expression&& a, char op) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (condition_list& x) {
|
||||
// If 'a' has a single condition, we could, instead of inserting
|
||||
// it insert its single condition (possibly negated if a._negated)
|
||||
// But considering it we don't evaluate these expressions many
|
||||
// times, this optimization is not worth extra code complexity.
|
||||
if (!x.conditions.empty() && x.op != op) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error("condition_expression::append called with mixed operators");
|
||||
}
|
||||
x.conditions.push_back(std::move(a));
|
||||
x.op = op;
|
||||
},
|
||||
[&] (primitive_condition& x) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error("condition_expression::append called on primitive_condition");
|
||||
}
|
||||
}, _expression);
|
||||
}
|
||||
|
||||
|
||||
} // namespace parsed
|
||||
} // namespace alternator
|
||||
|
||||
@@ -145,12 +145,6 @@ REMOVE: R E M O V E;
|
||||
ADD: A D D;
|
||||
DELETE: D E L E T E;
|
||||
|
||||
AND: A N D;
|
||||
OR: O R;
|
||||
NOT: N O T;
|
||||
BETWEEN: B E T W E E N;
|
||||
IN: I N;
|
||||
|
||||
fragment ALPHA: 'A'..'Z' | 'a'..'z';
|
||||
fragment DIGIT: '0'..'9';
|
||||
fragment ALNUM: ALPHA | DIGIT | '_';
|
||||
@@ -171,19 +165,19 @@ path returns [parsed::path p]:
|
||||
| '[' INTEGER ']' { $p.add_index(std::stoi($INTEGER.text)); }
|
||||
)*;
|
||||
|
||||
value returns [parsed::value v]:
|
||||
VALREF { $v.set_valref($VALREF.text); }
|
||||
| path { $v.set_path($path.p); }
|
||||
| NAME { $v.set_func_name($NAME.text); }
|
||||
'(' x=value { $v.add_func_parameter($x.v); }
|
||||
(',' x=value { $v.add_func_parameter($x.v); })*
|
||||
update_expression_set_value returns [parsed::value v]:
|
||||
VALREF { $v.set_valref($VALREF.text); }
|
||||
| path { $v.set_path($path.p); }
|
||||
| NAME { $v.set_func_name($NAME.text); }
|
||||
'(' x=update_expression_set_value { $v.add_func_parameter($x.v); }
|
||||
(',' x=update_expression_set_value { $v.add_func_parameter($x.v); })*
|
||||
')'
|
||||
;
|
||||
|
||||
update_expression_set_rhs returns [parsed::set_rhs rhs]:
|
||||
v=value { $rhs.set_value(std::move($v.v)); }
|
||||
( '+' v=value { $rhs.set_plus(std::move($v.v)); }
|
||||
| '-' v=value { $rhs.set_minus(std::move($v.v)); }
|
||||
v=update_expression_set_value { $rhs.set_value(std::move($v.v)); }
|
||||
( '+' v=update_expression_set_value { $rhs.set_plus(std::move($v.v)); }
|
||||
| '-' v=update_expression_set_value { $rhs.set_minus(std::move($v.v)); }
|
||||
)?
|
||||
;
|
||||
|
||||
@@ -218,48 +212,3 @@ update_expression returns [parsed::update_expression e]:
|
||||
projection_expression returns [std::vector<parsed::path> v]:
|
||||
p=path { $v.push_back(std::move($p.p)); }
|
||||
(',' p=path { $v.push_back(std::move($p.p)); } )* EOF;
|
||||
|
||||
|
||||
primitive_condition returns [parsed::primitive_condition c]:
|
||||
v=value { $c.add_value(std::move($v.v));
|
||||
$c.set_operator(parsed::primitive_condition::type::VALUE); }
|
||||
( ( '=' { $c.set_operator(parsed::primitive_condition::type::EQ); }
|
||||
| '<' '>' { $c.set_operator(parsed::primitive_condition::type::NE); }
|
||||
| '<' { $c.set_operator(parsed::primitive_condition::type::LT); }
|
||||
| '<' '=' { $c.set_operator(parsed::primitive_condition::type::LE); }
|
||||
| '>' { $c.set_operator(parsed::primitive_condition::type::GT); }
|
||||
| '>' '=' { $c.set_operator(parsed::primitive_condition::type::GE); }
|
||||
)
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
| BETWEEN { $c.set_operator(parsed::primitive_condition::type::BETWEEN); }
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
AND
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
| IN '(' { $c.set_operator(parsed::primitive_condition::type::IN); }
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
(',' v=value { $c.add_value(std::move($v.v)); })*
|
||||
')'
|
||||
)?
|
||||
;
|
||||
|
||||
// The following rules for parsing boolean expressions are verbose and
|
||||
// somewhat strange because of Antlr 3's limitations on recursive rules,
|
||||
// common rule prefixes, and (lack of) support for operator precedence.
|
||||
// These rules could have been written more clearly using a more powerful
|
||||
// parser generator - such as Yacc.
|
||||
boolean_expression returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_1 { $e.append(std::move($b.e), '|'); }
|
||||
(OR b=boolean_expression_1 { $e.append(std::move($b.e), '|'); } )*
|
||||
;
|
||||
boolean_expression_1 returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_2 { $e.append(std::move($b.e), '&'); }
|
||||
(AND b=boolean_expression_2 { $e.append(std::move($b.e), '&'); } )*
|
||||
;
|
||||
boolean_expression_2 returns [parsed::condition_expression e]:
|
||||
p=primitive_condition { $e.set_primitive(std::move($p.c)); }
|
||||
| NOT b=boolean_expression_2 { $e = std::move($b.e); $e.apply_not(); }
|
||||
| '(' b=boolean_expression ')' { $e = std::move($b.e); }
|
||||
;
|
||||
|
||||
condition_expression returns [parsed::condition_expression e]:
|
||||
boolean_expression { e=std::move($boolean_expression.e); } EOF;
|
||||
|
||||
@@ -36,6 +36,6 @@ public:
|
||||
|
||||
parsed::update_expression parse_update_expression(std::string query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string query);
|
||||
parsed::condition_expression parse_condition_expression(std::string query);
|
||||
|
||||
|
||||
} /* namespace alternator */
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "rjson.hh"
|
||||
#include "schema_fwd.hh"
|
||||
|
||||
#include "expressions_types.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// calculate_value() behaves slightly different (especially, different
|
||||
// functions supported) when used in different types of expressions, as
|
||||
// enumerated in this enum:
|
||||
enum class calculate_value_caller {
|
||||
UpdateExpression, ConditionExpression, ConditionExpressionAlone
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& out, calculate_value_caller caller) {
|
||||
switch (caller) {
|
||||
case calculate_value_caller::UpdateExpression:
|
||||
out << "UpdateExpression";
|
||||
break;
|
||||
case calculate_value_caller::ConditionExpression:
|
||||
out << "ConditionExpression";
|
||||
break;
|
||||
case calculate_value_caller::ConditionExpressionAlone:
|
||||
out << "ConditionExpression";
|
||||
break;
|
||||
default:
|
||||
out << "unknown type of expression";
|
||||
break;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2);
|
||||
|
||||
rjson::value calculate_value(const parsed::value& v,
|
||||
calculate_value_caller caller,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
const rjson::value& update_info,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item);
|
||||
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
std::unordered_set<std::string>& used_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
const rjson::value& req,
|
||||
schema_ptr schema,
|
||||
const std::unique_ptr<rjson::value>& previous_item);
|
||||
|
||||
} /* namespace alternator */
|
||||
@@ -88,15 +88,6 @@ struct value {
|
||||
void add_func_parameter(value v) {
|
||||
std::get<function_call>(_value)._parameters.emplace_back(std::move(v));
|
||||
}
|
||||
bool is_valref() const {
|
||||
return std::holds_alternative<std::string>(_value);
|
||||
}
|
||||
bool is_path() const {
|
||||
return std::holds_alternative<path>(_value);
|
||||
}
|
||||
bool is_func() const {
|
||||
return std::holds_alternative<function_call>(_value);
|
||||
}
|
||||
};
|
||||
|
||||
// The right-hand-side of a SET in an update expression can be either a
|
||||
@@ -171,58 +162,5 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// A primitive_condition is a condition expression involving one condition,
|
||||
// while the full condition_expression below adds boolean logic over these
|
||||
// primitive conditions.
|
||||
// The supported primitive conditions are:
|
||||
// 1. Binary operators - v1 OP v2, where OP is =, <>, <, <=, >, or >= and
|
||||
// v1 and v2 are values - from the item (an attribute path), the query
|
||||
// (a ":val" reference), or a function of the the above (only the size()
|
||||
// function is supported).
|
||||
// 2. Ternary operator - v1 BETWEEN v2 and v3 (means v1 >= v2 AND v1 <= v3).
|
||||
// 3. N-ary operator - v1 IN ( v2, v3, ... )
|
||||
// 4. A single function call (attribute_exists etc.). The parser actually
|
||||
// accepts a more general "value" here but later stages reject a value
|
||||
// which is not a function call (because DynamoDB does it too).
|
||||
class primitive_condition {
|
||||
public:
|
||||
enum class type {
|
||||
UNDEFINED, VALUE, EQ, NE, LT, LE, GT, GE, BETWEEN, IN
|
||||
};
|
||||
type _op = type::UNDEFINED;
|
||||
std::vector<value> _values;
|
||||
void set_operator(type op) {
|
||||
_op = op;
|
||||
}
|
||||
void add_value(value&& v) {
|
||||
_values.push_back(std::move(v));
|
||||
}
|
||||
bool empty() const {
|
||||
return _op == type::UNDEFINED;
|
||||
}
|
||||
};
|
||||
|
||||
class condition_expression {
|
||||
public:
|
||||
bool _negated = false; // If true, the entire condition is negated
|
||||
struct condition_list {
|
||||
char op = '|'; // '&' or '|'
|
||||
std::vector<condition_expression> conditions;
|
||||
};
|
||||
std::variant<primitive_condition, condition_list> _expression = condition_list();
|
||||
|
||||
void set_primitive(primitive_condition&& p) {
|
||||
_expression = std::move(p);
|
||||
}
|
||||
void append(condition_expression&& c, char op);
|
||||
void apply_not() {
|
||||
_negated = !_negated;
|
||||
}
|
||||
bool empty() const {
|
||||
return std::holds_alternative<condition_list>(_expression) &&
|
||||
std::get<condition_list>(_expression).conditions.empty();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace parsed
|
||||
} // namespace alternator
|
||||
|
||||
@@ -22,108 +22,14 @@
|
||||
#include "rjson.hh"
|
||||
#include "error.hh"
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/core/thread.hh>
|
||||
|
||||
namespace rjson {
|
||||
|
||||
static allocator the_allocator;
|
||||
|
||||
/*
|
||||
* This wrapper class adds nested level checks to rapidjson's handlers.
|
||||
* Each rapidjson handler implements functions for accepting JSON values,
|
||||
* which includes strings, numbers, objects, arrays, etc.
|
||||
* Parsing objects and arrays needs to be performed carefully with regard
|
||||
* to stack overflow - each object/array layer adds another stack frame
|
||||
* to parsing, printing and destroying the parent JSON document.
|
||||
* To prevent stack overflow, a rapidjson handler can be wrapped with
|
||||
* guarded_json_handler, which accepts an additional max_nested_level parameter.
|
||||
* After trying to exceed the max nested level, a proper rjson::error will be thrown.
|
||||
*/
|
||||
template<typename Handler, bool EnableYield>
|
||||
struct guarded_yieldable_json_handler : public Handler {
|
||||
size_t _nested_level = 0;
|
||||
size_t _max_nested_level;
|
||||
public:
|
||||
using handler_base = Handler;
|
||||
|
||||
explicit guarded_yieldable_json_handler(size_t max_nested_level) : _max_nested_level(max_nested_level) {}
|
||||
guarded_yieldable_json_handler(string_buffer& buf, size_t max_nested_level)
|
||||
: handler_base(buf), _max_nested_level(max_nested_level) {}
|
||||
|
||||
void Parse(const char* str, size_t length) {
|
||||
rapidjson::MemoryStream ms(static_cast<const char*>(str), length * sizeof(typename encoding::Ch));
|
||||
rapidjson::EncodedInputStream<encoding, rapidjson::MemoryStream> is(ms);
|
||||
rapidjson::GenericReader<encoding, encoding, allocator> reader(&the_allocator);
|
||||
reader.Parse(is, *this);
|
||||
if (reader.HasParseError()) {
|
||||
throw rjson::error(format("Parsing JSON failed: {}", rapidjson::GetParseError_En(reader.GetParseErrorCode())));
|
||||
}
|
||||
//NOTICE: The handler has parsed the string, but in case of rapidjson::GenericDocument
|
||||
// the data now resides in an internal stack_ variable, which is private instead of
|
||||
// protected... which means we cannot simply access its data. Fortunately, another
|
||||
// function for populating documents from SAX events can be abused to extract the data
|
||||
// from the stack via gadget-oriented programming - we use an empty event generator
|
||||
// which does nothing, and use it to call Populate(), which assumes that the generator
|
||||
// will fill the stack with something. It won't, but our stack is already filled with
|
||||
// data we want to steal, so once Populate() ends, our document will be properly parsed.
|
||||
// A proper solution could be programmed once rapidjson declares this stack_ variable
|
||||
// as protected instead of private, so that this class can access it.
|
||||
auto dummy_generator = [](handler_base&){return true;};
|
||||
handler_base::Populate(dummy_generator);
|
||||
}
|
||||
|
||||
bool StartObject() {
|
||||
++_nested_level;
|
||||
check_nested_level();
|
||||
maybe_yield();
|
||||
return handler_base::StartObject();
|
||||
}
|
||||
|
||||
bool EndObject(rapidjson::SizeType elements_count = 0) {
|
||||
--_nested_level;
|
||||
return handler_base::EndObject(elements_count);
|
||||
}
|
||||
|
||||
bool StartArray() {
|
||||
++_nested_level;
|
||||
check_nested_level();
|
||||
maybe_yield();
|
||||
return handler_base::StartArray();
|
||||
}
|
||||
|
||||
bool EndArray(rapidjson::SizeType elements_count = 0) {
|
||||
--_nested_level;
|
||||
return handler_base::EndArray(elements_count);
|
||||
}
|
||||
|
||||
bool Null() { maybe_yield(); return handler_base::Null(); }
|
||||
bool Bool(bool b) { maybe_yield(); return handler_base::Bool(b); }
|
||||
bool Int(int i) { maybe_yield(); return handler_base::Int(i); }
|
||||
bool Uint(unsigned u) { maybe_yield(); return handler_base::Uint(u); }
|
||||
bool Int64(int64_t i64) { maybe_yield(); return handler_base::Int64(i64); }
|
||||
bool Uint64(uint64_t u64) { maybe_yield(); return handler_base::Uint64(u64); }
|
||||
bool Double(double d) { maybe_yield(); return handler_base::Double(d); }
|
||||
bool String(const value::Ch* str, size_t length, bool copy = false) { maybe_yield(); return handler_base::String(str, length, copy); }
|
||||
bool Key(const value::Ch* str, size_t length, bool copy = false) { maybe_yield(); return handler_base::Key(str, length, copy); }
|
||||
|
||||
|
||||
protected:
|
||||
static void maybe_yield() {
|
||||
if constexpr (EnableYield) {
|
||||
thread::maybe_yield();
|
||||
}
|
||||
}
|
||||
|
||||
void check_nested_level() const {
|
||||
if (RAPIDJSON_UNLIKELY(_nested_level > _max_nested_level)) {
|
||||
throw rjson::error(format("Max nested level reached: {}", _max_nested_level));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
std::string print(const rjson::value& value) {
|
||||
string_buffer buffer;
|
||||
guarded_yieldable_json_handler<writer, false> writer(buffer, 78);
|
||||
writer writer(buffer);
|
||||
value.Accept(writer);
|
||||
return std::string(buffer.GetString());
|
||||
}
|
||||
@@ -132,9 +38,13 @@ rjson::value copy(const rjson::value& value) {
|
||||
return rjson::value(value, the_allocator);
|
||||
}
|
||||
|
||||
rjson::value parse(std::string_view str) {
|
||||
guarded_yieldable_json_handler<document, false> d(78);
|
||||
d.Parse(str.data(), str.size());
|
||||
rjson::value parse(const std::string& str) {
|
||||
return parse_raw(str.c_str(), str.size());
|
||||
}
|
||||
|
||||
rjson::value parse_raw(const char* c_str, size_t size) {
|
||||
rjson::document d;
|
||||
d.Parse(c_str, size);
|
||||
if (d.HasParseError()) {
|
||||
throw rjson::error(format("Parsing JSON failed: {}", GetParseError_En(d.GetParseError())));
|
||||
}
|
||||
@@ -142,22 +52,8 @@ rjson::value parse(std::string_view str) {
|
||||
return std::move(v);
|
||||
}
|
||||
|
||||
rjson::value parse_yieldable(std::string_view str) {
|
||||
guarded_yieldable_json_handler<document, true> d(78);
|
||||
d.Parse(str.data(), str.size());
|
||||
if (d.HasParseError()) {
|
||||
throw rjson::error(format("Parsing JSON failed: {}", GetParseError_En(d.GetParseError())));
|
||||
}
|
||||
rjson::value& v = d;
|
||||
return std::move(v);
|
||||
}
|
||||
|
||||
rjson::value& get(rjson::value& value, std::string_view name) {
|
||||
// Although FindMember() has a variant taking a StringRef, it ignores the
|
||||
// given length (see https://github.com/Tencent/rapidjson/issues/1649).
|
||||
// Luckily, the variant taking a GenericValue doesn't share this bug,
|
||||
// and we can create a string GenericValue without copying the string.
|
||||
auto member_it = value.FindMember(rjson::value(name.data(), name.size()));
|
||||
rjson::value& get(rjson::value& value, rjson::string_ref_type name) {
|
||||
auto member_it = value.FindMember(name);
|
||||
if (member_it != value.MemberEnd())
|
||||
return member_it->value;
|
||||
else {
|
||||
@@ -165,8 +61,8 @@ rjson::value& get(rjson::value& value, std::string_view name) {
|
||||
}
|
||||
}
|
||||
|
||||
const rjson::value& get(const rjson::value& value, std::string_view name) {
|
||||
auto member_it = value.FindMember(rjson::value(name.data(), name.size()));
|
||||
const rjson::value& get(const rjson::value& value, rjson::string_ref_type name) {
|
||||
auto member_it = value.FindMember(name);
|
||||
if (member_it != value.MemberEnd())
|
||||
return member_it->value;
|
||||
else {
|
||||
@@ -186,48 +82,24 @@ rjson::value from_string(const char* str, size_t size) {
|
||||
return rjson::value(str, size, the_allocator);
|
||||
}
|
||||
|
||||
rjson::value from_string(std::string_view view) {
|
||||
return rjson::value(view.data(), view.size(), the_allocator);
|
||||
}
|
||||
|
||||
const rjson::value* find(const rjson::value& value, std::string_view name) {
|
||||
// Although FindMember() has a variant taking a StringRef, it ignores the
|
||||
// given length (see https://github.com/Tencent/rapidjson/issues/1649).
|
||||
// Luckily, the variant taking a GenericValue doesn't share this bug,
|
||||
// and we can create a string GenericValue without copying the string.
|
||||
auto member_it = value.FindMember(rjson::value(name.data(), name.size()));
|
||||
const rjson::value* find(const rjson::value& value, string_ref_type name) {
|
||||
auto member_it = value.FindMember(name);
|
||||
return member_it != value.MemberEnd() ? &member_it->value : nullptr;
|
||||
}
|
||||
|
||||
rjson::value* find(rjson::value& value, std::string_view name) {
|
||||
auto member_it = value.FindMember(rjson::value(name.data(), name.size()));
|
||||
rjson::value* find(rjson::value& value, string_ref_type name) {
|
||||
auto member_it = value.FindMember(name);
|
||||
return member_it != value.MemberEnd() ? &member_it->value : nullptr;
|
||||
}
|
||||
|
||||
bool remove_member(rjson::value& value, std::string_view name) {
|
||||
// Although RemoveMember() has a variant taking a StringRef, it ignores
|
||||
// given length (see https://github.com/Tencent/rapidjson/issues/1649).
|
||||
// Luckily, the variant taking a GenericValue doesn't share this bug,
|
||||
// and we can create a string GenericValue without copying the string.
|
||||
return value.RemoveMember(rjson::value(name.data(), name.size()));
|
||||
}
|
||||
|
||||
void set_with_string_name(rjson::value& base, const std::string& name, rjson::value&& member) {
|
||||
base.AddMember(rjson::value(name.c_str(), name.size(), the_allocator), std::move(member), the_allocator);
|
||||
}
|
||||
|
||||
void set_with_string_name(rjson::value& base, std::string_view name, rjson::value&& member) {
|
||||
base.AddMember(rjson::value(name.data(), name.size(), the_allocator), std::move(member), the_allocator);
|
||||
}
|
||||
|
||||
void set_with_string_name(rjson::value& base, const std::string& name, rjson::string_ref_type member) {
|
||||
base.AddMember(rjson::value(name.c_str(), name.size(), the_allocator), rjson::value(member), the_allocator);
|
||||
}
|
||||
|
||||
void set_with_string_name(rjson::value& base, std::string_view name, rjson::string_ref_type member) {
|
||||
base.AddMember(rjson::value(name.data(), name.size(), the_allocator), rjson::value(member), the_allocator);
|
||||
}
|
||||
|
||||
void set(rjson::value& base, rjson::string_ref_type name, rjson::value&& member) {
|
||||
base.AddMember(name, std::move(member), the_allocator);
|
||||
}
|
||||
@@ -241,58 +113,6 @@ void push_back(rjson::value& base_array, rjson::value&& item) {
|
||||
|
||||
}
|
||||
|
||||
bool single_value_comp::operator()(const rjson::value& r1, const rjson::value& r2) const {
|
||||
auto r1_type = r1.GetType();
|
||||
auto r2_type = r2.GetType();
|
||||
|
||||
// null is the smallest type and compares with every other type, nothing is lesser than null
|
||||
if (r1_type == rjson::type::kNullType || r2_type == rjson::type::kNullType) {
|
||||
return r1_type < r2_type;
|
||||
}
|
||||
// only null, true, and false are comparable with each other, other types are not compatible
|
||||
if (r1_type != r2_type) {
|
||||
if (r1_type > rjson::type::kTrueType || r2_type > rjson::type::kTrueType) {
|
||||
throw rjson::error(format("Types are not comparable: {} {}", r1, r2));
|
||||
}
|
||||
}
|
||||
|
||||
switch (r1_type) {
|
||||
case rjson::type::kNullType:
|
||||
// fall-through
|
||||
case rjson::type::kFalseType:
|
||||
// fall-through
|
||||
case rjson::type::kTrueType:
|
||||
return r1_type < r2_type;
|
||||
case rjson::type::kObjectType:
|
||||
throw rjson::error("Object type comparison is not supported");
|
||||
case rjson::type::kArrayType:
|
||||
throw rjson::error("Array type comparison is not supported");
|
||||
case rjson::type::kStringType: {
|
||||
const size_t r1_len = r1.GetStringLength();
|
||||
const size_t r2_len = r2.GetStringLength();
|
||||
size_t len = std::min(r1_len, r2_len);
|
||||
int result = std::strncmp(r1.GetString(), r2.GetString(), len);
|
||||
return result < 0 || (result == 0 && r1_len < r2_len);
|
||||
}
|
||||
case rjson::type::kNumberType: {
|
||||
if (r1.IsInt() && r2.IsInt()) {
|
||||
return r1.GetInt() < r2.GetInt();
|
||||
} else if (r1.IsUint() && r2.IsUint()) {
|
||||
return r1.GetUint() < r2.GetUint();
|
||||
} else if (r1.IsInt64() && r2.IsInt64()) {
|
||||
return r1.GetInt64() < r2.GetInt64();
|
||||
} else if (r1.IsUint64() && r2.IsUint64()) {
|
||||
return r1.GetUint64() < r2.GetUint64();
|
||||
} else {
|
||||
// it's safe to call GetDouble() on any number type
|
||||
return r1.GetDouble() < r2.GetDouble();
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace rjson
|
||||
|
||||
std::ostream& std::operator<<(std::ostream& os, const rjson::value& v) {
|
||||
|
||||
@@ -104,49 +104,38 @@ inline rjson::value empty_string() {
|
||||
// The representation is dense - without any redundant indentation.
|
||||
std::string print(const rjson::value& value);
|
||||
|
||||
// Returns a string_view to the string held in a JSON value (which is
|
||||
// assumed to hold a string, i.e., v.IsString() == true). This is a view
|
||||
// to the existing data - no copying is done.
|
||||
inline std::string_view to_string_view(const rjson::value& v) {
|
||||
return std::string_view(v.GetString(), v.GetStringLength());
|
||||
}
|
||||
|
||||
// Copies given JSON value - involves allocation
|
||||
rjson::value copy(const rjson::value& value);
|
||||
|
||||
// Parses a JSON value from given string or raw character array.
|
||||
// The string/char array liveness does not need to be persisted,
|
||||
// as parse() will allocate member names and values.
|
||||
// as both parse() and parse_raw() will allocate member names and values.
|
||||
// Throws rjson::error if parsing failed.
|
||||
rjson::value parse(std::string_view str);
|
||||
// Needs to be run in thread context
|
||||
rjson::value parse_yieldable(std::string_view str);
|
||||
rjson::value parse(const std::string& str);
|
||||
rjson::value parse_raw(const char* c_str, size_t size);
|
||||
|
||||
// Creates a JSON value (of JSON string type) out of internal string representations.
|
||||
// The string value is copied, so str's liveness does not need to be persisted.
|
||||
rjson::value from_string(const std::string& str);
|
||||
rjson::value from_string(const sstring& str);
|
||||
rjson::value from_string(const char* str, size_t size);
|
||||
rjson::value from_string(std::string_view view);
|
||||
|
||||
// Returns a pointer to JSON member if it exists, nullptr otherwise
|
||||
rjson::value* find(rjson::value& value, std::string_view name);
|
||||
const rjson::value* find(const rjson::value& value, std::string_view name);
|
||||
rjson::value* find(rjson::value& value, rjson::string_ref_type name);
|
||||
const rjson::value* find(const rjson::value& value, rjson::string_ref_type name);
|
||||
|
||||
// Returns a reference to JSON member if it exists, throws otherwise
|
||||
rjson::value& get(rjson::value& value, std::string_view name);
|
||||
const rjson::value& get(const rjson::value& value, std::string_view name);
|
||||
rjson::value& get(rjson::value& value, rjson::string_ref_type name);
|
||||
const rjson::value& get(const rjson::value& value, rjson::string_ref_type name);
|
||||
|
||||
// Sets a member in given JSON object by moving the member - allocates the name.
|
||||
// Throws if base is not a JSON object.
|
||||
void set_with_string_name(rjson::value& base, const std::string& name, rjson::value&& member);
|
||||
void set_with_string_name(rjson::value& base, std::string_view name, rjson::value&& member);
|
||||
|
||||
// Sets a string member in given JSON object by assigning its reference - allocates the name.
|
||||
// NOTICE: member string liveness must be ensured to be at least as long as base's.
|
||||
// Throws if base is not a JSON object.
|
||||
void set_with_string_name(rjson::value& base, const std::string& name, rjson::string_ref_type member);
|
||||
void set_with_string_name(rjson::value& base, std::string_view name, rjson::string_ref_type member);
|
||||
|
||||
// Sets a member in given JSON object by moving the member.
|
||||
// NOTICE: name liveness must be ensured to be at least as long as base's.
|
||||
@@ -163,13 +152,6 @@ void set(rjson::value& base, rjson::string_ref_type name, rjson::string_ref_type
|
||||
// Throws if base_array is not a JSON array.
|
||||
void push_back(rjson::value& base_array, rjson::value&& item);
|
||||
|
||||
// Remove a member from a JSON object. Throws if value isn't an object.
|
||||
bool remove_member(rjson::value& value, std::string_view name);
|
||||
|
||||
struct single_value_comp {
|
||||
bool operator()(const rjson::value& r1, const rjson::value& r2) const;
|
||||
};
|
||||
|
||||
} // end namespace rjson
|
||||
|
||||
namespace std {
|
||||
|
||||
@@ -1,128 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastarx.hh>
|
||||
#include <service/storage_proxy.hh>
|
||||
#include <service/storage_proxy.hh>
|
||||
#include "rjson.hh"
|
||||
#include "executor.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// An rmw_operation encapsulates the common logic of all the item update
|
||||
// operations which may involve a read of the item before the write
|
||||
// (so-called Read-Modify-Write operations). These operations include PutItem,
|
||||
// UpdateItem and DeleteItem: All of these may be conditional operations (the
|
||||
// "Expected" parameter) which requir a read before the write, and UpdateItem
|
||||
// may also have an update expression which refers to the item's old value.
|
||||
//
|
||||
// The code below supports running the read and the write together as one
|
||||
// transaction using LWT (this is why rmw_operation is a subclass of
|
||||
// cas_request, as required by storage_proxy::cas()), but also has optional
|
||||
// modes not using LWT.
|
||||
class rmw_operation : public service::cas_request, public enable_shared_from_this<rmw_operation> {
|
||||
public:
|
||||
// The following options choose which mechanism to use for isolating
|
||||
// parallel write operations:
|
||||
// * The FORBID_RMW option forbids RMW (read-modify-write) operations
|
||||
// such as conditional updates. For the remaining write-only
|
||||
// operations, ordinary quorum writes are isolated enough.
|
||||
// * The LWT_ALWAYS option always uses LWT (lightweight transactions)
|
||||
// for any write operation - whether or not it also has a read.
|
||||
// * The LWT_RMW_ONLY option uses LWT only for RMW operations, and uses
|
||||
// ordinary quorum writes for write-only operations.
|
||||
// This option is not safe if the user may send both RMW and write-only
|
||||
// operations on the same item.
|
||||
// * The UNSAFE_RMW option does read-modify-write operations as separate
|
||||
// read and write. It is unsafe - concurrent RMW operations are not
|
||||
// isolated at all. This option will likely be removed in the future.
|
||||
enum class write_isolation {
|
||||
FORBID_RMW, LWT_ALWAYS, LWT_RMW_ONLY, UNSAFE_RMW
|
||||
};
|
||||
static constexpr auto WRITE_ISOLATION_TAG_KEY = "system:write_isolation";
|
||||
|
||||
static write_isolation get_write_isolation_for_schema(schema_ptr schema);
|
||||
|
||||
static write_isolation default_write_isolation;
|
||||
public:
|
||||
static void set_default_write_isolation(std::string_view mode);
|
||||
|
||||
protected:
|
||||
// The full request JSON
|
||||
rjson::value _request;
|
||||
// All RMW operations involve a single item with a specific partition
|
||||
// and optional clustering key, in a single table, so the following
|
||||
// information is common to all of them:
|
||||
schema_ptr _schema;
|
||||
partition_key _pk = partition_key::make_empty();
|
||||
clustering_key _ck = clustering_key::make_empty();
|
||||
write_isolation _write_isolation;
|
||||
|
||||
// All RMW operations can have a ReturnValues parameter from the following
|
||||
// choices. But note that only UpdateItem actually supports all of them:
|
||||
enum class returnvalues {
|
||||
NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW
|
||||
} _returnvalues;
|
||||
static returnvalues parse_returnvalues(const rjson::value& request);
|
||||
// When _returnvalues != NONE, apply() should store here, in JSON form,
|
||||
// the values which are to be returned in the "Attributes" field.
|
||||
// The default null JSON means do not return an Attributes field at all.
|
||||
// This field is marked "mutable" so that the const apply() can modify
|
||||
// it (see explanation below), but note that because apply() may be
|
||||
// called more than once, if apply() will sometimes set this field it
|
||||
// must set it (even if just to the default empty value) every time.
|
||||
mutable rjson::value _return_attributes;
|
||||
public:
|
||||
// The constructor of a rmw_operation subclass should parse the request
|
||||
// and try to discover as many input errors as it can before really
|
||||
// attempting the read or write operations.
|
||||
rmw_operation(service::storage_proxy& proxy, rjson::value&& request);
|
||||
// rmw_operation subclasses (update_item_operation, put_item_operation
|
||||
// and delete_item_operation) shall implement an apply() function which
|
||||
// takes the previous value of the item (if it was read) and creates the
|
||||
// write mutation. If the previous value of item does not pass the needed
|
||||
// conditional expression, apply() should return an empty optional.
|
||||
// apply() may throw if it encounters input errors not discovered during
|
||||
// the constructor.
|
||||
// apply() may be called more than once in case of contention, so it must
|
||||
// not change the state saved in the object (issue #7218 was caused by
|
||||
// violating this). We mark apply() "const" to let the compiler validate
|
||||
// this for us. The output-only field _return_attributes is marked
|
||||
// "mutable" above so that apply() can still write to it.
|
||||
virtual std::optional<mutation> apply(std::unique_ptr<rjson::value> previous_item, api::timestamp_type ts) const = 0;
|
||||
// Convert the above apply() into the signature needed by cas_request:
|
||||
virtual std::optional<mutation> apply(foreign_ptr<lw_shared_ptr<query::result>> qr, const query::partition_slice& slice, api::timestamp_type ts) override;
|
||||
virtual ~rmw_operation() = default;
|
||||
schema_ptr schema() const { return _schema; }
|
||||
const rjson::value& request() const { return _request; }
|
||||
rjson::value&& move_request() && { return std::move(_request); }
|
||||
future<executor::request_return_type> execute(service::storage_proxy& proxy,
|
||||
service::client_state& client_state,
|
||||
tracing::trace_state_ptr trace_state,
|
||||
service_permit permit,
|
||||
bool needs_read_before_write,
|
||||
stats& stats);
|
||||
std::optional<shard_id> shard_for_execute(bool needs_read_before_write);
|
||||
};
|
||||
|
||||
} // namespace alternator
|
||||
@@ -25,7 +25,6 @@
|
||||
#include "error.hh"
|
||||
#include "rapidjson/writer.h"
|
||||
#include "concrete_types.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
|
||||
static logging::logger slogger("alternator-serialization");
|
||||
|
||||
@@ -68,7 +67,7 @@ struct from_json_visitor {
|
||||
bo.write(t.from_string(sstring_view(v.GetString(), v.GetStringLength())));
|
||||
}
|
||||
void operator()(const bytes_type_impl& t) const {
|
||||
bo.write(base64_decode(v));
|
||||
bo.write(base64_decode(std::string_view(v.GetString(), v.GetStringLength())));
|
||||
}
|
||||
void operator()(const boolean_type_impl& t) const {
|
||||
bo.write(boolean_type->decompose(v.GetBool()));
|
||||
@@ -78,7 +77,7 @@ struct from_json_visitor {
|
||||
}
|
||||
// default
|
||||
void operator()(const abstract_type& t) const {
|
||||
bo.write(from_json_object(t, Json::Value(rjson::print(v)), cql_serialization_format::internal()));
|
||||
bo.write(t.from_json_object(Json::Value(rjson::print(v)), cql_serialization_format::internal()));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -108,7 +107,7 @@ struct to_json_visitor {
|
||||
|
||||
void operator()(const reversed_type_impl& t) const { visit(*t.underlying_type(), to_json_visitor{deserialized, type_ident, bv}); };
|
||||
void operator()(const decimal_type_impl& t) const {
|
||||
auto s = to_json_string(*decimal_type, bytes(bv));
|
||||
auto s = decimal_type->to_json_string(bytes(bv));
|
||||
//FIXME(sarna): unnecessary copy
|
||||
rjson::set_with_string_name(deserialized, type_ident, rjson::from_string(s));
|
||||
}
|
||||
@@ -121,7 +120,7 @@ struct to_json_visitor {
|
||||
}
|
||||
// default
|
||||
void operator()(const abstract_type& t) const {
|
||||
rjson::set_with_string_name(deserialized, type_ident, rjson::parse(to_json_string(t, bytes(bv))));
|
||||
rjson::set_with_string_name(deserialized, type_ident, rjson::parse(t.to_string(bytes(bv))));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -136,7 +135,7 @@ rjson::value deserialize_item(bytes_view bv) {
|
||||
|
||||
if (atype == alternator_type::NOT_SUPPORTED_YET) {
|
||||
slogger.trace("Non-optimal deserialization of alternator type {}", int8_t(atype));
|
||||
return rjson::parse(std::string_view(reinterpret_cast<const char *>(bv.data()), bv.size()));
|
||||
return rjson::parse_raw(reinterpret_cast<const char *>(bv.data()), bv.size());
|
||||
}
|
||||
type_representation type_representation = represent_type(atype);
|
||||
visit(*type_representation.dtype, to_json_visitor{deserialized, type_representation.ident, bv});
|
||||
@@ -153,43 +152,34 @@ std::string type_to_string(data_type type) {
|
||||
};
|
||||
auto it = types.find(type);
|
||||
if (it == types.end()) {
|
||||
// fall back to string, in order to be able to present
|
||||
// internal Scylla types in a human-readable way
|
||||
return "S";
|
||||
throw std::runtime_error(format("Unknown type {}", type->name()));
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
bytes get_key_column_value(const rjson::value& item, const column_definition& column) {
|
||||
std::string column_name = column.name_as_text();
|
||||
const rjson::value* key_typed_value = rjson::find(item, column_name);
|
||||
if (!key_typed_value) {
|
||||
throw api_error("ValidationException", format("Key column {} not found", column_name));
|
||||
std::string expected_type = type_to_string(column.type);
|
||||
|
||||
const rjson::value& key_typed_value = rjson::get(item, rjson::value::StringRefType(column_name.c_str()));
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1) {
|
||||
throw api_error("ValidationException",
|
||||
format("Missing or invalid value object for key column {}: {}", column_name, item));
|
||||
}
|
||||
return get_key_from_typed_value(*key_typed_value, column);
|
||||
return get_key_from_typed_value(key_typed_value, column, expected_type);
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry, whose key is the type (expected to match the key column's type)
|
||||
// and the value is the encoded value.
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column) {
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error("ValidationException",
|
||||
format("Malformed value object for key column {}: {}",
|
||||
column.name_as_text(), key_typed_value));
|
||||
}
|
||||
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column, const std::string& expected_type) {
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (it->name != type_to_string(column.type)) {
|
||||
if (it->name.GetString() != expected_type) {
|
||||
throw api_error("ValidationException",
|
||||
format("Type mismatch: expected type {} for key column {}, got type {}",
|
||||
type_to_string(column.type), column.name_as_text(), it->name.GetString()));
|
||||
expected_type, column.name_as_text(), it->name.GetString()));
|
||||
}
|
||||
if (column.type == bytes_type) {
|
||||
return base64_decode(it->value);
|
||||
return base64_decode(it->value.GetString());
|
||||
} else {
|
||||
return column.type->from_string(rjson::to_string_view(it->value));
|
||||
return column.type->from_string(it->value.GetString());
|
||||
}
|
||||
|
||||
}
|
||||
@@ -204,14 +194,11 @@ rjson::value json_key_column_value(bytes_view cell, const column_definition& col
|
||||
// FIXME: use specialized Alternator number type, not the more
|
||||
// general "decimal_type". A dedicated type can be more efficient
|
||||
// in storage space and in parsing speed.
|
||||
auto s = to_json_string(*decimal_type, bytes(cell));
|
||||
auto s = decimal_type->to_json_string(bytes(cell));
|
||||
return rjson::from_string(s);
|
||||
} else {
|
||||
// Support for arbitrary key types is useful for parsing values of virtual tables,
|
||||
// which can involve any type supported by Scylla.
|
||||
// In order to guarantee that the returned type is parsable by alternator clients,
|
||||
// they are represented simply as strings.
|
||||
return rjson::from_string(column.type->to_string(bytes(cell)));
|
||||
// We shouldn't get here, we shouldn't see such key columns.
|
||||
throw std::runtime_error(format("Unexpected key type: {}", column.type->name()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -240,34 +227,4 @@ clustering_key ck_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
return clustering_key::from_exploded(raw_ck);
|
||||
}
|
||||
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error("ValidationException", format("{}: invalid number object", diagnostic));
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
if (it->name != "N") {
|
||||
throw api_error("ValidationException", format("{}: expected number, found type '{}'", diagnostic, it->name));
|
||||
}
|
||||
if (it->value.IsNumber()) {
|
||||
// FIXME(sarna): should use big_decimal constructor with numeric values directly:
|
||||
return big_decimal(rjson::print(it->value));
|
||||
}
|
||||
if (!it->value.IsString()) {
|
||||
throw api_error("ValidationException", format("{}: improperly formatted number constant", diagnostic));
|
||||
}
|
||||
return big_decimal(it->value.GetString());
|
||||
}
|
||||
|
||||
const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return {"", nullptr};
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
const std::string it_key = it->name.GetString();
|
||||
if (it_key != "SS" && it_key != "BS" && it_key != "NS") {
|
||||
return {"", nullptr};
|
||||
}
|
||||
return std::make_pair(it_key, &(it->value));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -22,12 +22,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include "types.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include "schema.hh"
|
||||
#include "keys.hh"
|
||||
#include "rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -54,19 +52,10 @@ rjson::value deserialize_item(bytes_view bv);
|
||||
std::string type_to_string(data_type type);
|
||||
|
||||
bytes get_key_column_value(const rjson::value& item, const column_definition& column);
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column);
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column, const std::string& expected_type);
|
||||
rjson::value json_key_column_value(bytes_view cell, const column_definition& column);
|
||||
|
||||
partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
|
||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
||||
|
||||
// If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
|
||||
// raises ValidationException with diagnostic.
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
||||
|
||||
// Check if a given JSON object encodes a set (i.e., it is a {"SS": [...]}, or "NS", "BS"
|
||||
// and returns set's type and a pointer to that set. If the object does not encode a set,
|
||||
// returned value is {"", nullptr}
|
||||
const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value& v);
|
||||
|
||||
}
|
||||
|
||||
@@ -24,13 +24,10 @@
|
||||
#include <seastar/http/function_handlers.hh>
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <seastarx.hh>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include "error.hh"
|
||||
#include "rjson.hh"
|
||||
#include "auth.hh"
|
||||
#include <cctype>
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "service/storage_service.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
static logging::logger slogger("alternator-server");
|
||||
|
||||
@@ -40,23 +37,12 @@ namespace alternator {
|
||||
|
||||
static constexpr auto TARGET = "X-Amz-Target";
|
||||
|
||||
inline std::vector<std::string_view> split(std::string_view text, char separator) {
|
||||
std::vector<std::string_view> tokens;
|
||||
inline std::vector<sstring> split(const sstring& text, const char* separator) {
|
||||
if (text == "") {
|
||||
return tokens;
|
||||
return std::vector<sstring>();
|
||||
}
|
||||
|
||||
while (true) {
|
||||
auto pos = text.find_first_of(separator);
|
||||
if (pos != std::string_view::npos) {
|
||||
tokens.emplace_back(text.data(), pos);
|
||||
text.remove_prefix(pos + 1);
|
||||
} else {
|
||||
tokens.emplace_back(text);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
std::vector<sstring> tokens;
|
||||
return boost::split(tokens, text, boost::is_any_of(separator));
|
||||
}
|
||||
|
||||
// DynamoDB HTTP error responses are structured as follows
|
||||
@@ -67,9 +53,9 @@ inline std::vector<std::string_view> split(std::string_view text, char separator
|
||||
// Internal Server Error.
|
||||
class api_handler : public handler_base {
|
||||
public:
|
||||
api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle) : _f_handle(
|
||||
[this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
|
||||
return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped([this, rep = std::move(rep)](future<executor::request_return_type> resf) mutable {
|
||||
api_handler(const future_json_function& _handle) : _f_handle(
|
||||
[_handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
|
||||
return seastar::futurize_apply(_handle, std::move(req)).then_wrapped([rep = std::move(rep)](future<json::json_return_type> resf) mutable {
|
||||
if (resf.failed()) {
|
||||
// Exceptions of type api_error are wrapped as JSON and
|
||||
// returned to the client as expected. Other types of
|
||||
@@ -88,24 +74,20 @@ public:
|
||||
format("Internal server error: {}", std::current_exception()),
|
||||
reply::status_type::internal_server_error);
|
||||
}
|
||||
generate_error_reply(*rep, ret);
|
||||
// FIXME: what is this version number?
|
||||
rep->_content += "{\"__type\":\"com.amazonaws.dynamodb.v20120810#" + ret._type + "\"," +
|
||||
"\"message\":\"" + ret._msg + "\"}";
|
||||
rep->_status = ret._http_code;
|
||||
slogger.trace("api_handler error case: {}", rep->_content);
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
slogger.trace("api_handler success case");
|
||||
auto res = resf.get0();
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const json::json_return_type& json_return_value) {
|
||||
slogger.trace("api_handler success case");
|
||||
if (json_return_value._body_writer) {
|
||||
rep->write_body("json", std::move(json_return_value._body_writer));
|
||||
} else {
|
||||
rep->_content += json_return_value._res;
|
||||
}
|
||||
},
|
||||
[&] (const api_error& err) {
|
||||
generate_error_reply(*rep, err);
|
||||
}
|
||||
}, res);
|
||||
|
||||
if (res._body_writer) {
|
||||
rep->write_body("json", std::move(res._body_writer));
|
||||
} else {
|
||||
rep->_content += res._res;
|
||||
}
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
});
|
||||
}), _type("json") { }
|
||||
@@ -121,367 +103,70 @@ public:
|
||||
}
|
||||
|
||||
protected:
|
||||
void generate_error_reply(reply& rep, const api_error& err) {
|
||||
rep._content += "{\"__type\":\"com.amazonaws.dynamodb.v20120810#" + err._type + "\"," +
|
||||
"\"message\":\"" + err._msg + "\"}";
|
||||
rep._status = err._http_code;
|
||||
slogger.trace("api_handler error case: {}", rep._content);
|
||||
}
|
||||
|
||||
future_handler_function _f_handle;
|
||||
sstring _type;
|
||||
};
|
||||
|
||||
class gated_handler : public handler_base {
|
||||
seastar::gate& _gate;
|
||||
public:
|
||||
gated_handler(seastar::gate& gate) : _gate(gate) {}
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) = 0;
|
||||
virtual future<std::unique_ptr<reply>> handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) final override {
|
||||
return with_gate(_gate, [this, &path, req = std::move(req), rep = std::move(rep)] () mutable {
|
||||
return do_handle(path, std::move(req), std::move(rep));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
class health_handler : public gated_handler {
|
||||
public:
|
||||
health_handler(seastar::gate& pending_requests) : gated_handler(pending_requests) {}
|
||||
protected:
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
rep->set_status(reply::status_type::ok);
|
||||
rep->write_body("txt", format("healthy: {}", req->get_header("Host")));
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
};
|
||||
|
||||
class local_nodelist_handler : public gated_handler {
|
||||
public:
|
||||
local_nodelist_handler(seastar::gate& pending_requests) : gated_handler(pending_requests) {}
|
||||
protected:
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
rjson::value results = rjson::empty_array();
|
||||
// It's very easy to get a list of all live nodes on the cluster,
|
||||
// using gms::get_local_gossiper().get_live_members(). But getting
|
||||
// just the list of live nodes in this DC needs more elaborate code:
|
||||
sstring local_dc = locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(
|
||||
utils::fb_utilities::get_broadcast_address());
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes =
|
||||
service::get_local_storage_service().get_token_metadata().
|
||||
get_topology().get_datacenter_endpoints().at(local_dc);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
if (gms::get_local_gossiper().is_alive(ip)) {
|
||||
rjson::push_back(results, rjson::from_string(ip.to_sstring()));
|
||||
}
|
||||
}
|
||||
rep->set_status(reply::status_type::ok);
|
||||
rep->set_content_type("json");
|
||||
rep->_content = rjson::print(results);
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
};
|
||||
|
||||
future<> server::verify_signature(const request& req) {
|
||||
if (!_enforce_authorization) {
|
||||
slogger.debug("Skipping authorization");
|
||||
return make_ready_future<>();
|
||||
}
|
||||
auto host_it = req._headers.find("Host");
|
||||
if (host_it == req._headers.end()) {
|
||||
throw api_error("InvalidSignatureException", "Host header is mandatory for signature verification");
|
||||
}
|
||||
auto authorization_it = req._headers.find("Authorization");
|
||||
if (authorization_it == req._headers.end()) {
|
||||
throw api_error("InvalidSignatureException", "Authorization header is mandatory for signature verification");
|
||||
}
|
||||
std::string host = host_it->second;
|
||||
std::vector<std::string_view> credentials_raw = split(authorization_it->second, ' ');
|
||||
std::string credential;
|
||||
std::string user_signature;
|
||||
std::string signed_headers_str;
|
||||
std::vector<std::string_view> signed_headers;
|
||||
for (std::string_view entry : credentials_raw) {
|
||||
std::vector<std::string_view> entry_split = split(entry, '=');
|
||||
if (entry_split.size() != 2) {
|
||||
if (entry != "AWS4-HMAC-SHA256") {
|
||||
throw api_error("InvalidSignatureException", format("Only AWS4-HMAC-SHA256 algorithm is supported. Found: {}", entry));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
std::string_view auth_value = entry_split[1];
|
||||
// Commas appear as an additional (quite redundant) delimiter
|
||||
if (auth_value.back() == ',') {
|
||||
auth_value.remove_suffix(1);
|
||||
}
|
||||
if (entry_split[0] == "Credential") {
|
||||
credential = std::string(auth_value);
|
||||
} else if (entry_split[0] == "Signature") {
|
||||
user_signature = std::string(auth_value);
|
||||
} else if (entry_split[0] == "SignedHeaders") {
|
||||
signed_headers_str = std::string(auth_value);
|
||||
signed_headers = split(auth_value, ';');
|
||||
std::sort(signed_headers.begin(), signed_headers.end());
|
||||
}
|
||||
}
|
||||
std::vector<std::string_view> credential_split = split(credential, '/');
|
||||
if (credential_split.size() != 5) {
|
||||
throw api_error("ValidationException", format("Incorrect credential information format: {}", credential));
|
||||
}
|
||||
std::string user(credential_split[0]);
|
||||
std::string datestamp(credential_split[1]);
|
||||
std::string region(credential_split[2]);
|
||||
std::string service(credential_split[3]);
|
||||
|
||||
std::map<std::string_view, std::string_view> signed_headers_map;
|
||||
for (const auto& header : signed_headers) {
|
||||
signed_headers_map.emplace(header, std::string_view());
|
||||
}
|
||||
for (auto& header : req._headers) {
|
||||
std::string header_str;
|
||||
header_str.resize(header.first.size());
|
||||
std::transform(header.first.begin(), header.first.end(), header_str.begin(), ::tolower);
|
||||
auto it = signed_headers_map.find(header_str);
|
||||
if (it != signed_headers_map.end()) {
|
||||
it->second = std::string_view(header.second);
|
||||
}
|
||||
}
|
||||
|
||||
auto cache_getter = [] (std::string username) {
|
||||
return get_key_from_roles(cql3::get_query_processor().local(), std::move(username));
|
||||
void server::set_routes(routes& r) {
|
||||
using alternator_callback = std::function<future<json::json_return_type>(executor&, executor::client_state&, std::unique_ptr<request>)>;
|
||||
std::unordered_map<std::string, alternator_callback> routes{
|
||||
{"CreateTable", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) {
|
||||
return e.maybe_create_keyspace().then([&e, &client_state, req = std::move(req)] { return e.create_table(client_state, req->content); }); }
|
||||
},
|
||||
{"DescribeTable", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.describe_table(client_state, req->content); }},
|
||||
{"DeleteTable", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.delete_table(client_state, req->content); }},
|
||||
{"PutItem", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.put_item(client_state, req->content); }},
|
||||
{"UpdateItem", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.update_item(client_state, req->content); }},
|
||||
{"GetItem", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.get_item(client_state, req->content); }},
|
||||
{"DeleteItem", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.delete_item(client_state, req->content); }},
|
||||
{"ListTables", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.list_tables(client_state, req->content); }},
|
||||
{"Scan", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.scan(client_state, req->content); }},
|
||||
{"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.describe_endpoints(client_state, req->content, req->get_header("Host")); }},
|
||||
{"BatchWriteItem", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.batch_write_item(client_state, req->content); }},
|
||||
{"BatchGetItem", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.batch_get_item(client_state, req->content); }},
|
||||
{"Query", [] (executor& e, executor::client_state& client_state, std::unique_ptr<request> req) { return e.query(client_state, req->content); }},
|
||||
};
|
||||
return _key_cache.get_ptr(user, cache_getter).then([this, &req,
|
||||
user = std::move(user),
|
||||
host = std::move(host),
|
||||
datestamp = std::move(datestamp),
|
||||
signed_headers_str = std::move(signed_headers_str),
|
||||
signed_headers_map = std::move(signed_headers_map),
|
||||
region = std::move(region),
|
||||
service = std::move(service),
|
||||
user_signature = std::move(user_signature)] (key_cache::value_ptr key_ptr) {
|
||||
std::string signature = get_signature(user, *key_ptr, std::string_view(host), req._method,
|
||||
datestamp, signed_headers_str, signed_headers_map, req.content, region, service, "");
|
||||
|
||||
if (signature != std::string_view(user_signature)) {
|
||||
_key_cache.remove(user);
|
||||
throw api_error("UnrecognizedClientException", "The security token included in the request is invalid.");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
future<executor::request_return_type> server::handle_api_request(std::unique_ptr<request>&& req) {
|
||||
_executor._stats.total_operations++;
|
||||
sstring target = req->get_header(TARGET);
|
||||
std::vector<std::string_view> split_target = split(target, '.');
|
||||
//NOTICE(sarna): Target consists of Dynamo API version followed by a dot '.' and operation type (e.g. CreateTable)
|
||||
std::string op = split_target.empty() ? std::string() : std::string(split_target.back());
|
||||
slogger.trace("Request: {} {}", op, req->content);
|
||||
return verify_signature(*req).then([this, op, req = std::move(req)] () mutable {
|
||||
auto callback_it = _callbacks.find(op);
|
||||
if (callback_it == _callbacks.end()) {
|
||||
_executor._stats.unsupported_operations++;
|
||||
api_handler* handler = new api_handler([this, routes = std::move(routes)](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
_executor.local()._stats.total_operations++;
|
||||
sstring target = req->get_header(TARGET);
|
||||
std::vector<sstring> split_target = split(target, ".");
|
||||
//NOTICE(sarna): Target consists of Dynamo API version folllowed by a dot '.' and operation type (e.g. CreateTable)
|
||||
sstring op = split_target.empty() ? sstring() : split_target.back();
|
||||
slogger.trace("Request: {} {}", op, req->content);
|
||||
auto callback_it = routes.find(op);
|
||||
if (callback_it == routes.end()) {
|
||||
_executor.local()._stats.unsupported_operations++;
|
||||
throw api_error("UnknownOperationException",
|
||||
format("Unsupported operation {}", op));
|
||||
}
|
||||
return with_gate(_pending_requests, [this, callback_it = std::move(callback_it), op = std::move(op), req = std::move(req)] () mutable {
|
||||
//FIXME: Client state can provide more context, e.g. client's endpoint address
|
||||
// We use unique_ptr because client_state cannot be moved or copied
|
||||
return do_with(std::make_unique<executor::client_state>(executor::client_state::internal_tag()),
|
||||
[this, callback_it = std::move(callback_it), op = std::move(op), req = std::move(req)] (std::unique_ptr<executor::client_state>& client_state) mutable {
|
||||
tracing::trace_state_ptr trace_state = executor::maybe_trace_query(*client_state, op, req->content);
|
||||
tracing::trace(trace_state, op);
|
||||
// JSON parsing can allocate up to roughly 2x the size of the raw document, + a couple of bytes for maintenance.
|
||||
// FIXME: by this time, the whole HTTP request was already read, so some memory is already occupied.
|
||||
// Once HTTP allows working on streams, we should grab the permit *before* reading the HTTP payload.
|
||||
size_t mem_estimate = req->content.size() * 3 + 8000;
|
||||
auto units_fut = get_units(*_memory_limiter, mem_estimate);
|
||||
if (_memory_limiter->waiters()) {
|
||||
++_executor._stats.requests_blocked_memory;
|
||||
}
|
||||
return units_fut.then([this, callback_it = std::move(callback_it), &client_state, trace_state, req = std::move(req)] (semaphore_units<> units) mutable {
|
||||
return _json_parser.parse(req->content).then([this, callback_it = std::move(callback_it), &client_state, trace_state,
|
||||
units = std::move(units), req = std::move(req)] (rjson::value json_request) mutable {
|
||||
return callback_it->second(_executor, *client_state, trace_state, make_service_permit(std::move(units)), std::move(json_request), std::move(req)).finally([trace_state] {});
|
||||
});
|
||||
});
|
||||
});
|
||||
//FIXME: Client state can provide more context, e.g. client's endpoint address
|
||||
return do_with(executor::client_state::for_internal_calls(), [this, callback_it = std::move(callback_it), op = std::move(op), req = std::move(req)] (executor::client_state& client_state) mutable {
|
||||
client_state.set_raw_keyspace(executor::KEYSPACE_NAME);
|
||||
executor::maybe_trace_query(client_state, op, req->content);
|
||||
tracing::trace(client_state.get_trace_state(), op);
|
||||
return callback_it->second(_executor.local(), client_state, std::move(req));
|
||||
});
|
||||
});
|
||||
|
||||
r.add(operation_type::POST, url("/"), handler);
|
||||
}
|
||||
|
||||
void server::set_routes(routes& r) {
|
||||
api_handler* req_handler = new api_handler([this] (std::unique_ptr<request> req) mutable {
|
||||
return handle_api_request(std::move(req));
|
||||
});
|
||||
|
||||
r.put(operation_type::POST, "/", req_handler);
|
||||
r.put(operation_type::GET, "/", new health_handler(_pending_requests));
|
||||
// The "/localnodes" request is a new Alternator feature, not supported by
|
||||
// DynamoDB and not required for DynamoDB compatibility. It allows a
|
||||
// client to enquire - using a trivial HTTP request without requiring
|
||||
// authentication - the list of all live nodes in the same data center of
|
||||
// the Alternator cluster. The client can use this list to balance its
|
||||
// request load to all the nodes in the same geographical region.
|
||||
// Note that this API exposes - openly without authentication - the
|
||||
// information on the cluster's members inside one data center. We do not
|
||||
// consider this to be a security risk, because an attacker can already
|
||||
// scan an entire subnet for nodes responding to the health request,
|
||||
// or even just scan for open ports.
|
||||
r.put(operation_type::GET, "/localnodes", new local_nodelist_handler(_pending_requests));
|
||||
}
|
||||
|
||||
//FIXME: A way to immediately invalidate the cache should be considered,
|
||||
// e.g. when the system table which stores the keys is changed.
|
||||
// For now, this propagation may take up to 1 minute.
|
||||
server::server(executor& exec)
|
||||
: _http_server("http-alternator")
|
||||
, _https_server("https-alternator")
|
||||
, _executor(exec)
|
||||
, _key_cache(1024, 1min, slogger)
|
||||
, _enforce_authorization(false)
|
||||
, _enabled_servers{}
|
||||
, _pending_requests{}
|
||||
, _callbacks{
|
||||
{"CreateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.create_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DeleteTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.delete_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"PutItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.put_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DeleteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.delete_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListTables", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_tables(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"Scan", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.scan(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_endpoints(client_state, std::move(permit), std::move(json_request), req->get_header("Host"));
|
||||
}},
|
||||
{"BatchWriteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.batch_write_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"BatchGetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.batch_get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"Query", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.query(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"TagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.tag_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UntagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.untag_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListTagsOfResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_tags_of_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
} {
|
||||
}
|
||||
|
||||
future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||
bool enforce_authorization, semaphore* memory_limiter) {
|
||||
_memory_limiter = memory_limiter;
|
||||
_enforce_authorization = enforce_authorization;
|
||||
if (!port && !https_port) {
|
||||
return make_exception_future<>(std::runtime_error("Either regular port or TLS port"
|
||||
" must be specified in order to init an alternator HTTP server instance"));
|
||||
}
|
||||
return seastar::async([this, addr, port, https_port, creds] {
|
||||
try {
|
||||
_executor.start().get();
|
||||
|
||||
if (port) {
|
||||
set_routes(_http_server._routes);
|
||||
_http_server.set_content_length_limit(server::content_length_limit);
|
||||
_http_server.listen(socket_address{addr, *port}).get();
|
||||
_enabled_servers.push_back(std::ref(_http_server));
|
||||
}
|
||||
if (https_port) {
|
||||
set_routes(_https_server._routes);
|
||||
_https_server.set_content_length_limit(server::content_length_limit);
|
||||
_https_server.set_tls_credentials(creds->build_reloadable_server_credentials([](const std::unordered_set<sstring>& files, std::exception_ptr ep) {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get0());
|
||||
_https_server.listen(socket_address{addr, *https_port}).get();
|
||||
_enabled_servers.push_back(std::ref(_https_server));
|
||||
}
|
||||
} catch (...) {
|
||||
slogger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}: {}",
|
||||
addr, port ? std::to_string(*port) : "OFF", https_port ? std::to_string(*https_port) : "OFF", std::current_exception());
|
||||
std::throw_with_nested(std::runtime_error(
|
||||
format("Failed to set up Alternator HTTP server on {} port {}, TLS port {}",
|
||||
addr, port ? std::to_string(*port) : "OFF", https_port ? std::to_string(*https_port) : "OFF")));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
future<> server::stop() {
|
||||
return parallel_for_each(_enabled_servers, [] (http_server& server) {
|
||||
return server.stop();
|
||||
future<> server::init(net::inet_address addr, uint16_t port) {
|
||||
return _executor.invoke_on_all([] (executor& e) {
|
||||
return e.start();
|
||||
}).then([this] {
|
||||
return _pending_requests.close();
|
||||
return _control.start();
|
||||
}).then([this] {
|
||||
return _json_parser.stop();
|
||||
return _control.set_routes(std::bind(&server::set_routes, this, std::placeholders::_1));
|
||||
}).then([this, addr, port] {
|
||||
return _control.listen(socket_address{addr, port});
|
||||
}).then([addr, port] {
|
||||
slogger.info("Alternator HTTP server listening on {} port {}", addr, port);
|
||||
}).handle_exception([addr, port] (std::exception_ptr e) {
|
||||
slogger.warn("Failed to set up Alternator HTTP server on {} port {}: {}", addr, port, e);
|
||||
});
|
||||
}
|
||||
|
||||
server::json_parser::json_parser() : _run_parse_json_thread(async([this] {
|
||||
while (true) {
|
||||
_document_waiting.wait().get();
|
||||
if (_as.abort_requested()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
_parsed_document = rjson::parse_yieldable(_raw_document);
|
||||
_current_exception = nullptr;
|
||||
} catch (...) {
|
||||
_current_exception = std::current_exception();
|
||||
}
|
||||
_document_parsed.signal();
|
||||
}
|
||||
})) {
|
||||
}
|
||||
|
||||
future<rjson::value> server::json_parser::parse(std::string_view content) {
|
||||
if (content.size() < yieldable_parsing_threshold) {
|
||||
return make_ready_future<rjson::value>(rjson::parse(content));
|
||||
}
|
||||
return with_semaphore(_parsing_sem, 1, [this, content] {
|
||||
_raw_document = content;
|
||||
_document_waiting.signal();
|
||||
return _document_parsed.wait().then([this] {
|
||||
if (_current_exception) {
|
||||
return make_exception_future<rjson::value>(_current_exception);
|
||||
}
|
||||
return make_ready_future<rjson::value>(std::move(_parsed_document));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<> server::json_parser::stop() {
|
||||
_as.request_abort();
|
||||
_document_waiting.signal();
|
||||
_document_parsed.broken();
|
||||
return std::move(_run_parse_json_thread);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -24,59 +24,18 @@
|
||||
#include "alternator/executor.hh"
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include <seastar/net/tls.hh>
|
||||
#include <optional>
|
||||
#include <alternator/auth.hh>
|
||||
#include <utils/small_vector.hh>
|
||||
#include <seastar/core/units.hh>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
class server {
|
||||
static constexpr size_t content_length_limit = 16*MB;
|
||||
using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&,
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<request>)>;
|
||||
using alternator_callbacks_map = std::unordered_map<std::string_view, alternator_callback>;
|
||||
|
||||
http_server _http_server;
|
||||
http_server _https_server;
|
||||
executor& _executor;
|
||||
|
||||
key_cache _key_cache;
|
||||
bool _enforce_authorization;
|
||||
utils::small_vector<std::reference_wrapper<seastar::httpd::http_server>, 2> _enabled_servers;
|
||||
gate _pending_requests;
|
||||
alternator_callbacks_map _callbacks;
|
||||
|
||||
semaphore* _memory_limiter;
|
||||
|
||||
class json_parser {
|
||||
static constexpr size_t yieldable_parsing_threshold = 16*KB;
|
||||
std::string_view _raw_document;
|
||||
rjson::value _parsed_document;
|
||||
std::exception_ptr _current_exception;
|
||||
semaphore _parsing_sem{1};
|
||||
condition_variable _document_waiting;
|
||||
condition_variable _document_parsed;
|
||||
abort_source _as;
|
||||
future<> _run_parse_json_thread;
|
||||
public:
|
||||
json_parser();
|
||||
future<rjson::value> parse(std::string_view content);
|
||||
future<> stop();
|
||||
};
|
||||
json_parser _json_parser;
|
||||
|
||||
seastar::httpd::http_server_control _control;
|
||||
seastar::sharded<executor>& _executor;
|
||||
public:
|
||||
server(executor& executor);
|
||||
server(seastar::sharded<executor>& executor) : _executor(executor) {}
|
||||
|
||||
future<> init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||
bool enforce_authorization, semaphore* memory_limiter);
|
||||
future<> stop();
|
||||
seastar::future<> init(net::inet_address addr, uint16_t port);
|
||||
private:
|
||||
void set_routes(seastar::httpd::routes& r);
|
||||
future<> verify_signature(const seastar::httpd::request& r);
|
||||
future<executor::request_return_type> handle_api_request(std::unique_ptr<request>&& req);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -85,12 +85,6 @@ stats::stats() : api_operations{} {
|
||||
seastar::metrics::description("number of total operations via Alternator API")),
|
||||
seastar::metrics::make_total_operations("reads_before_write", reads_before_write,
|
||||
seastar::metrics::description("number of performed read-before-write operations")),
|
||||
seastar::metrics::make_total_operations("write_using_lwt", write_using_lwt,
|
||||
seastar::metrics::description("number of writes that used LWT")),
|
||||
seastar::metrics::make_total_operations("shard_bounce_for_lwt", shard_bounce_for_lwt,
|
||||
seastar::metrics::description("number writes that had to be bounced from this shard because of LWT requirements")),
|
||||
seastar::metrics::make_total_operations("requests_blocked_memory", requests_blocked_memory,
|
||||
seastar::metrics::description("Counts a number of requests blocked due to memory pressure.")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_read_total", cql_stats.filtered_rows_read_total,
|
||||
seastar::metrics::description("number of rows read during filtering operations")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_matched_total", cql_stats.filtered_rows_matched_total,
|
||||
|
||||
@@ -84,9 +84,6 @@ public:
|
||||
uint64_t total_operations = 0;
|
||||
uint64_t unsupported_operations = 0;
|
||||
uint64_t reads_before_write = 0;
|
||||
uint64_t write_using_lwt = 0;
|
||||
uint64_t shard_bounce_for_lwt = 0;
|
||||
uint64_t requests_blocked_memory = 0;
|
||||
// CQL-derived stats
|
||||
cql3::cql_stats cql_stats;
|
||||
private:
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "serializer.hh"
|
||||
#include "schema.hh"
|
||||
#include "db/extensions.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
class tags_extension : public schema_extension {
|
||||
public:
|
||||
static constexpr auto NAME = "scylla_tags";
|
||||
|
||||
tags_extension() = default;
|
||||
explicit tags_extension(const std::map<sstring, sstring>& tags) : _tags(std::move(tags)) {}
|
||||
explicit tags_extension(bytes b) : _tags(tags_extension::deserialize(b)) {}
|
||||
explicit tags_extension(const sstring& s) {
|
||||
throw std::logic_error("Cannot create tags from string");
|
||||
}
|
||||
bytes serialize() const override {
|
||||
return ser::serialize_to_buffer<bytes>(_tags);
|
||||
}
|
||||
static std::map<sstring, sstring> deserialize(bytes_view buffer) {
|
||||
return ser::deserialize_from_buffer(buffer, boost::type<std::map<sstring, sstring>>());
|
||||
}
|
||||
const std::map<sstring, sstring>& tags() const {
|
||||
return _tags;
|
||||
}
|
||||
private:
|
||||
std::map<sstring, sstring> _tags;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get row cache save period in seconds",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_save_period_in_seconds",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -35,7 +35,7 @@
|
||||
"description":"row cache save period in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -48,7 +48,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get key cache save period in seconds",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_key_cache_save_period_in_seconds",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -70,7 +70,7 @@
|
||||
"description":"key cache save period in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -83,7 +83,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get counter cache save period in seconds",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_counter_cache_save_period_in_seconds",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -105,7 +105,7 @@
|
||||
"description":"counter cache save period in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -118,7 +118,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get row cache keys to save",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_keys_to_save",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -140,7 +140,7 @@
|
||||
"description":"row cache keys to save",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -153,7 +153,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get key cache keys to save",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_key_cache_keys_to_save",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -175,7 +175,7 @@
|
||||
"description":"key cache keys to save",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -188,7 +188,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get counter cache keys to save",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_counter_cache_keys_to_save",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -210,7 +210,7 @@
|
||||
"description":"counter cache keys to save",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -448,7 +448,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get key entries",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_key_entries",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -568,7 +568,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get row entries",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_row_entries",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -688,7 +688,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get counter entries",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_counter_entries",
|
||||
"produces": [
|
||||
"application/json"
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Force a major compaction of this column family",
|
||||
"type":"void",
|
||||
"type":"string",
|
||||
"nickname":"force_major_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -121,7 +121,7 @@
|
||||
"description":"The minimum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -172,7 +172,7 @@
|
||||
"description":"The maximum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -223,7 +223,7 @@
|
||||
"description":"The maximum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
@@ -231,7 +231,7 @@
|
||||
"description":"The minimum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -380,54 +380,16 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"check if the auto_compaction property is enabled for a given table",
|
||||
"summary":"check if the auto compaction disabled",
|
||||
"type":"boolean",
|
||||
"nickname":"get_auto_compaction",
|
||||
"nickname":"is_auto_compaction_disabled",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Enable table auto compaction",
|
||||
"type":"void",
|
||||
"nickname":"enable_auto_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Disable table auto compaction",
|
||||
"type":"void",
|
||||
"nickname":"disable_auto_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"description":"The column family name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -582,7 +544,7 @@
|
||||
"summary":"sstable count for each level. empty unless leveled compaction is used",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type": "long"
|
||||
"type":"int"
|
||||
},
|
||||
"nickname":"get_sstable_count_per_level",
|
||||
"produces":[
|
||||
@@ -674,7 +636,7 @@
|
||||
"description":"Duration (in milliseconds) of monitoring operation",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
@@ -682,7 +644,7 @@
|
||||
"description":"number of the top partitions to list",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
@@ -690,7 +652,7 @@
|
||||
"description":"capacity of stream summary: determines amount of resources used in query processing",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -959,7 +921,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get memtable switch count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_memtable_switch_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -983,7 +945,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all memtable switch count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_memtable_switch_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1120,7 +1082,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get read latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1273,7 +1235,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all read latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1289,7 +1251,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1313,7 +1275,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all range latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1329,7 +1291,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get write latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1482,7 +1444,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all write latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1498,7 +1460,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get pending flushes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_pending_flushes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1522,7 +1484,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all pending flushes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_pending_flushes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1538,7 +1500,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get pending compactions",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_pending_compactions",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1562,7 +1524,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all pending compactions",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_pending_compactions",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1578,7 +1540,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get live ss table count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_live_ss_table_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1602,7 +1564,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all live ss table count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_live_ss_table_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1618,7 +1580,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get live disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_live_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1642,7 +1604,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all live disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_live_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1658,7 +1620,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1682,7 +1644,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2138,7 +2100,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get speculative retries",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_speculative_retries",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2162,7 +2124,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all speculative retries",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_speculative_retries",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2242,7 +2204,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get row cache hit out of range",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_hit_out_of_range",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2266,7 +2228,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all row cache hit out of range",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_row_cache_hit_out_of_range",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2282,7 +2244,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get row cache hit",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_hit",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2306,7 +2268,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all row cache hit",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_row_cache_hit",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2322,7 +2284,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get row cache miss",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_miss",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2346,7 +2308,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all row cache miss",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_row_cache_miss",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2362,7 +2324,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get cas prepare",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_cas_prepare",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2386,7 +2348,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get cas propose",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_cas_propose",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2410,7 +2372,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get cas commit",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_cas_commit",
|
||||
"produces":[
|
||||
"application/json"
|
||||
|
||||
@@ -118,7 +118,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get pending tasks",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_pending_tasks",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -181,7 +181,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get bytes compacted",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_bytes_compacted",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -197,7 +197,7 @@
|
||||
"description":"A row merged information",
|
||||
"properties":{
|
||||
"key":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"The number of sstable"
|
||||
},
|
||||
"value":{
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/error_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/v2/error_injection/injection/{injection}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Activate an injection that triggers an error in code",
|
||||
"type":"void",
|
||||
"nickname":"enable_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name, should correspond to an injection added in code",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"one_shot",
|
||||
"description":"boolean flag indicating whether the injection should be enabled to trigger only once",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Deactivate an injection previously activated by the API",
|
||||
"type":"void",
|
||||
"nickname":"disable_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/v2/error_injection/injection",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"List all enabled injections on all shards, i.e. injections that will trigger an error in the code",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"nickname":"get_enabled_injections_on_all",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Deactivate all injections previously activated on all shards by the API",
|
||||
"type":"void",
|
||||
"nickname":"disable_on_all",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -110,7 +110,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get count down endpoint",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_down_endpoint_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -126,7 +126,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get count up endpoint",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_up_endpoint_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -180,11 +180,11 @@
|
||||
"description": "The endpoint address"
|
||||
},
|
||||
"generation": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The heart beat generation"
|
||||
},
|
||||
"version": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The heart beat version"
|
||||
},
|
||||
"update_time": {
|
||||
@@ -209,7 +209,7 @@
|
||||
"description": "Holds a version value for an application state",
|
||||
"properties": {
|
||||
"application_state": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The application state enum index"
|
||||
},
|
||||
"value": {
|
||||
@@ -217,7 +217,7 @@
|
||||
"description": "The version value"
|
||||
},
|
||||
"version": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The application state version"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns files which are pending for archival attempt. Does NOT include failed archive attempts",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_current_generation_number",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -99,7 +99,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get heart beat version for a node",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_current_heart_beat_version",
|
||||
"produces":[
|
||||
"application/json"
|
||||
|
||||
@@ -99,7 +99,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get create hint count",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_create_hint_count",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -123,7 +123,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get not stored hints count",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_not_stored_hints_count",
|
||||
"produces": [
|
||||
"application/json"
|
||||
|
||||
@@ -191,7 +191,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the version number",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_version",
|
||||
"produces":[
|
||||
"application/json"
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the max hint window",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_max_hint_window",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -128,7 +128,7 @@
|
||||
"description":"max hint window in ms",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -141,7 +141,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get max hints in progress",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_max_hints_in_progress",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -164,7 +164,7 @@
|
||||
"description":"max hints in progress",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -177,7 +177,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get hints in progress",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_hints_in_progress",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -602,7 +602,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_write_metrics_unfinished_commit",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -632,7 +632,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_write_metrics_condition_not_met",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -641,28 +641,13 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_write/failed_read_round_optimization",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas write metrics",
|
||||
"type": "long",
|
||||
"nickname": "get_cas_write_metrics_failed_read_round_optimization",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_read/unfinished_commit",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas read metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_read_metrics_unfinished_commit",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -686,13 +671,28 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_read/condition_not_met",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas read metrics",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_read_metrics_condition_not_met",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/read/timeouts",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get read metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_read_metrics_timeouts",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -707,7 +707,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get read metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_read_metrics_unavailables",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -842,7 +842,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get range metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_range_metrics_timeouts",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -857,7 +857,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get range metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_range_metrics_unavailables",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -902,7 +902,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_write_metrics_timeouts",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -917,7 +917,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_write_metrics_unavailables",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -1023,7 +1023,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get read latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1055,7 +1055,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get write latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1087,7 +1087,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
|
||||
@@ -458,7 +458,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Return the generation value for this node.",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_current_generation_number",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -582,15 +582,7 @@
|
||||
},
|
||||
{
|
||||
"name":"kn",
|
||||
"description":"Comma seperated keyspaces name that their snapshot will be deleted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"an optional table name that its snapshot will be deleted",
|
||||
"description":"Comma seperated keyspaces name to snapshot",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -654,7 +646,7 @@
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Trigger a cleanup of keys on a single keyspace",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"force_keyspace_cleanup",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -686,7 +678,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Scrub (deserialize + reserialize at the latest version, skipping bad rows if any) the given keyspace. If columnFamilies array is empty, all CFs are scrubbed. Scrubbed CFs will be snapshotted first, if disableSnapshot is false",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"scrub",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -734,7 +726,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip bad rows and do not snapshot sstables first.",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"upgrade_sstables",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -808,7 +800,7 @@
|
||||
"summary":"Return an array with the ids of the currently active repairs",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type": "long"
|
||||
"type":"int"
|
||||
},
|
||||
"nickname":"get_active_repair_async",
|
||||
"produces":[
|
||||
@@ -824,7 +816,7 @@
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Invoke repair asynchronously. You can track repair progress by using the get supplying id",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"repair_async",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -955,7 +947,7 @@
|
||||
"description":"The repair ID to check for status",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -1285,18 +1277,18 @@
|
||||
},
|
||||
{
|
||||
"name":"dynamic_update_interval",
|
||||
"description":"interval in ms (default 100)",
|
||||
"description":"integer, in ms (default 100)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"integer",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"dynamic_reset_interval",
|
||||
"description":"interval in ms (default 600,000)",
|
||||
"description":"integer, in ms (default 600,000)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"type":"integer",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
@@ -1501,7 +1493,7 @@
|
||||
"description":"Stream throughput",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -1509,7 +1501,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get stream throughput mb per sec",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_stream_throughput_mb_per_sec",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1525,7 +1517,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get compaction throughput mb per sec",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_compaction_throughput_mb_per_sec",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1547,7 +1539,7 @@
|
||||
"description":"compaction throughput",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -1951,7 +1943,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns the threshold for warning of queries with many tombstones",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_tombstone_warn_threshold",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1973,7 +1965,7 @@
|
||||
"description":"tombstone debug threshold",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -1986,7 +1978,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_tombstone_failure_threshold",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2008,7 +2000,7 @@
|
||||
"description":"tombstone debug threshold",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -2021,7 +2013,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns the threshold for rejecting queries due to a large batch size",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_batch_size_failure_threshold",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2043,7 +2035,7 @@
|
||||
"description":"batch size debug threshold",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -2067,7 +2059,7 @@
|
||||
"description":"throttle in kb",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -2080,7 +2072,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get load",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_metrics_load",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2096,7 +2088,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get exceptions",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_exceptions",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2112,7 +2104,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total hints in progress",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_hints_in_progress",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2128,7 +2120,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total hints",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_hints",
|
||||
"produces":[
|
||||
"application/json"
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get number of active outbound streams",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_active_streams_outbound",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -48,7 +48,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total incoming bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_incoming_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -72,7 +72,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total incoming bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_incoming_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -88,7 +88,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total outgoing bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_outgoing_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -112,7 +112,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total outgoing bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_outgoing_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -154,7 +154,7 @@
|
||||
"description":"The peer"
|
||||
},
|
||||
"session_index":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"The session index"
|
||||
},
|
||||
"connecting":{
|
||||
@@ -211,7 +211,7 @@
|
||||
"description":"The ID"
|
||||
},
|
||||
"files":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"Number of files to transfer. Can be 0 if nothing to transfer for some streaming request."
|
||||
},
|
||||
"total_size":{
|
||||
@@ -242,7 +242,7 @@
|
||||
"description":"The peer address"
|
||||
},
|
||||
"session_index":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"The session index"
|
||||
},
|
||||
"file_name":{
|
||||
|
||||
@@ -52,21 +52,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/uptime_ms",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get system uptime, in milliseconds",
|
||||
"type":"long",
|
||||
"nickname":"get_system_uptime",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/logger/{name}",
|
||||
"operations":[
|
||||
|
||||
16
api/api.cc
16
api/api.cc
@@ -36,7 +36,6 @@
|
||||
#include "endpoint_snitch.hh"
|
||||
#include "compaction_manager.hh"
|
||||
#include "hinted_handoff.hh"
|
||||
#include "error_injection.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "stream_manager.hh"
|
||||
#include "system.hh"
|
||||
@@ -69,19 +68,13 @@ future<> set_server_init(http_context& ctx) {
|
||||
rb->set_api_doc(r);
|
||||
rb02->set_api_doc(r);
|
||||
rb02->register_api_file(r, "swagger20_header");
|
||||
set_config(rb02, ctx, r);
|
||||
rb->register_function(r, "system",
|
||||
"The system related API");
|
||||
set_system(ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_config(http_context& ctx) {
|
||||
auto rb02 = std::make_shared < api_registry_builder20 > (ctx.api_doc, "/v2");
|
||||
return ctx.http_server.set_routes([&ctx, rb02](routes& r) {
|
||||
set_config(rb02, ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
static future<> register_api(http_context& ctx, const sstring& api_name,
|
||||
const sstring api_desc,
|
||||
std::function<void(http_context& ctx, routes& r)> f) {
|
||||
@@ -97,10 +90,6 @@ future<> set_server_storage_service(http_context& ctx) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", set_storage_service);
|
||||
}
|
||||
|
||||
future<> set_server_snapshot(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { set_snapshot(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snitch(http_context& ctx) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", set_endpoint_snitch);
|
||||
}
|
||||
@@ -164,9 +153,6 @@ future<> set_server_done(http_context& ctx) {
|
||||
rb->register_function(r, "collectd",
|
||||
"The collectd API");
|
||||
set_collectd(ctx, r);
|
||||
rb->register_function(r, "error_injection",
|
||||
"The error injection API");
|
||||
set_error_injection(ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -23,9 +23,6 @@
|
||||
#include "service/storage_proxy.hh"
|
||||
#include <seastar/http/httpd.hh>
|
||||
|
||||
namespace service { class load_meter; }
|
||||
namespace locator { class token_metadata; }
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context {
|
||||
@@ -34,21 +31,15 @@ struct http_context {
|
||||
httpd::http_server_control http_server;
|
||||
distributed<database>& db;
|
||||
distributed<service::storage_proxy>& sp;
|
||||
service::load_meter& lmeter;
|
||||
sharded<locator::token_metadata>& token_metadata;
|
||||
|
||||
http_context(distributed<database>& _db,
|
||||
distributed<service::storage_proxy>& _sp,
|
||||
service::load_meter& _lm, sharded<locator::token_metadata>& _tm)
|
||||
: db(_db), sp(_sp), lmeter(_lm), token_metadata(_tm) {
|
||||
distributed<service::storage_proxy>& _sp)
|
||||
: db(_db), sp(_sp) {
|
||||
}
|
||||
};
|
||||
|
||||
future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx);
|
||||
future<> set_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx);
|
||||
future<> set_server_load_sstable(http_context& ctx);
|
||||
future<> set_server_messaging_service(http_context& ctx);
|
||||
|
||||
@@ -208,11 +208,9 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cs::get_row_capacity.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().region().occupancy().used_space();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().get_cache_tracker().region().occupancy().used_space();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -253,19 +251,15 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// In origin row size is the weighted size.
|
||||
// We currently do not support weights, so we use num entries instead
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().partitions();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().partitions();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_counter_capacity.set(r, [] (std::unique_ptr<request> req) {
|
||||
|
||||
@@ -64,7 +64,7 @@ static const char* str_to_regex(const sstring& v) {
|
||||
void set_collectd(http_context& ctx, routes& r) {
|
||||
cd::get_collectd.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
|
||||
auto id = ::make_shared<scollectd::type_instance_id>(req->param["pluginid"],
|
||||
auto id = make_shared<scollectd::type_instance_id>(req->param["pluginid"],
|
||||
req->get_query_param("instance"), req->get_query_param("type"),
|
||||
req->get_query_param("type_instance"));
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#include "sstables/sstables.hh"
|
||||
#include "utils/estimated_histogram.hh"
|
||||
#include <algorithm>
|
||||
#include "db/system_keyspace_view_types.hh"
|
||||
|
||||
#include "db/data_listeners.hh"
|
||||
|
||||
extern logging::logger apilog;
|
||||
@@ -53,7 +53,8 @@ std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name) {
|
||||
return std::make_tuple(name.substr(0, pos), name.substr(end));
|
||||
}
|
||||
|
||||
const utils::UUID& get_uuid(const sstring& ks, const sstring& cf, const database& db) {
|
||||
const utils::UUID& get_uuid(const sstring& name, const database& db) {
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(name);
|
||||
try {
|
||||
return db.find_uuid(ks, cf);
|
||||
} catch (std::out_of_range& e) {
|
||||
@@ -61,11 +62,6 @@ const utils::UUID& get_uuid(const sstring& ks, const sstring& cf, const database
|
||||
}
|
||||
}
|
||||
|
||||
const utils::UUID& get_uuid(const sstring& name, const database& db) {
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(name);
|
||||
return get_uuid(ks, cf, db);
|
||||
}
|
||||
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, function<void(column_family&)> f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
|
||||
@@ -75,28 +71,28 @@ future<> foreach_column_family(http_context& ctx, const sstring& name, function<
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
|
||||
int64_t column_family_stats::*f) {
|
||||
int64_t column_family::stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const column_family& cf) {
|
||||
return cf.get_stats().*f;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
int64_t column_family_stats::*f) {
|
||||
int64_t column_family::stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const column_family& cf) {
|
||||
return cf.get_stats().*f;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const column_family& cf) {
|
||||
return (cf.get_stats().*f).hist.count;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([uuid, f](database& db) {
|
||||
// Histograms information is sample of the actual load
|
||||
@@ -112,14 +108,14 @@ static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const
|
||||
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx,
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const column_family& cf) {
|
||||
return (cf.get_stats().*f).hist.count;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
@@ -130,7 +126,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, const
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
std::function<utils::ihistogram(const database&)> fun = [f] (const database& db) {
|
||||
utils::ihistogram res;
|
||||
for (auto i : db.get_column_families()) {
|
||||
@@ -146,7 +142,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils:
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).rate();},
|
||||
@@ -157,7 +153,7 @@ static future<json::json_return_type> get_cf_rate_and_histogram(http_context& c
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram column_family::stats::*f) {
|
||||
std::function<utils::rate_moving_average_and_histogram(const database&)> fun = [f] (const database& db) {
|
||||
utils::rate_moving_average_and_histogram res;
|
||||
for (auto i : db.get_column_families()) {
|
||||
@@ -254,11 +250,12 @@ class sum_ratio {
|
||||
uint64_t _n = 0;
|
||||
T _total = 0;
|
||||
public:
|
||||
void operator()(T value) {
|
||||
future<> operator()(T value) {
|
||||
if (value > 0) {
|
||||
_total += value;
|
||||
_n++;
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}
|
||||
// Returns average value of all registered ratios.
|
||||
T get() && {
|
||||
@@ -407,11 +404,11 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_memtable_switch_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx,req->param["name"] ,&column_family_stats::memtable_switch_count);
|
||||
return get_cf_stats(ctx,req->param["name"] ,&column_family::stats::memtable_switch_count);
|
||||
});
|
||||
|
||||
cf::get_all_memtable_switch_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &column_family_stats::memtable_switch_count);
|
||||
return get_cf_stats(ctx, &column_family::stats::memtable_switch_count);
|
||||
});
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
@@ -456,67 +453,67 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_pending_flushes.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx,req->param["name"] ,&column_family_stats::pending_flushes);
|
||||
return get_cf_stats(ctx,req->param["name"] ,&column_family::stats::pending_flushes);
|
||||
});
|
||||
|
||||
cf::get_all_pending_flushes.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &column_family_stats::pending_flushes);
|
||||
return get_cf_stats(ctx, &column_family::stats::pending_flushes);
|
||||
});
|
||||
|
||||
cf::get_read.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx,req->param["name"] ,&column_family_stats::reads);
|
||||
return get_cf_stats_count(ctx,req->param["name"] ,&column_family::stats::reads);
|
||||
});
|
||||
|
||||
cf::get_all_read.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx, &column_family_stats::reads);
|
||||
return get_cf_stats_count(ctx, &column_family::stats::reads);
|
||||
});
|
||||
|
||||
cf::get_write.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx, req->param["name"] ,&column_family_stats::writes);
|
||||
return get_cf_stats_count(ctx, req->param["name"] ,&column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx, &column_family_stats::writes);
|
||||
return get_cf_stats_count(ctx, &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_read_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::reads);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::reads);
|
||||
});
|
||||
|
||||
cf::get_read_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &column_family_stats::reads);
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &column_family::stats::reads);
|
||||
});
|
||||
|
||||
cf::get_read_latency.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_sum(ctx,req->param["name"] ,&column_family_stats::reads);
|
||||
return get_cf_stats_sum(ctx,req->param["name"] ,&column_family::stats::reads);
|
||||
});
|
||||
|
||||
cf::get_write_latency.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_sum(ctx, req->param["name"] ,&column_family_stats::writes);
|
||||
return get_cf_stats_sum(ctx, req->param["name"] ,&column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_read_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, &column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_read_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, &column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, &column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, &column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, &column_family::stats::writes);
|
||||
});
|
||||
|
||||
cf::get_pending_compactions.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -532,11 +529,11 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_live_ss_table_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, req->param["name"], &column_family_stats::live_sstable_count);
|
||||
return get_cf_stats(ctx, req->param["name"], &column_family::stats::live_sstable_count);
|
||||
});
|
||||
|
||||
cf::get_all_live_ss_table_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &column_family_stats::live_sstable_count);
|
||||
return get_cf_stats(ctx, &column_family::stats::live_sstable_count);
|
||||
});
|
||||
|
||||
cf::get_unleveled_sstables.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -795,25 +792,25 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
});
|
||||
|
||||
cf::get_cas_prepare.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_prepare;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
cf::get_cas_prepare.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_cas_propose.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_accept;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
cf::get_cas_propose.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_cas_commit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_learn;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
cf::get_cas_commit.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_sstables_per_read_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -824,11 +821,11 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_tombstone_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::tombstone_scanned);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::tombstone_scanned);
|
||||
});
|
||||
|
||||
cf::get_live_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::live_scanned);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::live_scanned);
|
||||
});
|
||||
|
||||
cf::get_col_update_time_delta_histogram.set(r, [] (std::unique_ptr<request> req) {
|
||||
@@ -839,49 +836,19 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
cf::get_auto_compaction.set(r, [&ctx] (const_req req) {
|
||||
const utils::UUID& uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
return !cf.is_auto_compaction_disabled_by_user();
|
||||
cf::is_auto_compaction_disabled.set(r, [] (const_req req) {
|
||||
// FIXME
|
||||
// currently auto compaction is disable
|
||||
// it should be changed when it would have an API
|
||||
return true;
|
||||
});
|
||||
|
||||
cf::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
cf.enable_auto_compaction();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
cf::get_built_indexes.set(r, [](const_req) {
|
||||
// FIXME
|
||||
// Currently there are no index support
|
||||
return std::vector<sstring>();
|
||||
});
|
||||
|
||||
cf::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
cf.disable_auto_compaction();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_built_indexes.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto [ks, cf_name] = parse_fully_qualified_cf_name(req->param["name"]);
|
||||
return db::system_keyspace::load_view_build_progress().then([ks, cf_name, &ctx](const std::vector<db::system_keyspace::view_build_progress>& vb) mutable {
|
||||
std::set<sstring> vp;
|
||||
for (auto b : vb) {
|
||||
if (b.view.first == ks) {
|
||||
vp.insert(b.view.second);
|
||||
}
|
||||
}
|
||||
std::vector<sstring> res;
|
||||
auto uuid = get_uuid(ks, cf_name, ctx.db.local());
|
||||
column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
res.reserve(cf.get_index_manager().list_indexes().size());
|
||||
for (auto&& i : cf.get_index_manager().list_indexes()) {
|
||||
if (vp.find(secondary_index::index_table_name(i.metadata().name())) == vp.end()) {
|
||||
res.emplace_back(i.metadata().name());
|
||||
}
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_compression_metadata_off_heap_memory_used.set(r, [](const_req) {
|
||||
// FIXME
|
||||
@@ -1009,15 +976,5 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cf::force_major_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
if (req->get_query_param("split_output") != "") {
|
||||
fail(unimplemented::cause::API);
|
||||
}
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
return cf.compact_all_sstables();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -109,9 +109,9 @@ future<json::json_return_type> map_reduce_cf(http_context& ctx, I init,
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
|
||||
int64_t column_family_stats::*f);
|
||||
int64_t column_family::stats::*f);
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
int64_t column_family_stats::*f);
|
||||
int64_t column_family::stats::*f);
|
||||
|
||||
}
|
||||
|
||||
@@ -74,14 +74,13 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
|
||||
cm::get_pending_tasks_by_table.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([&ctx](database& db) {
|
||||
return do_with(std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>(), [&ctx, &db](std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>& tasks) {
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<utils::UUID, seastar::lw_shared_ptr<table>>& i) {
|
||||
table& cf = *i.second.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.get_compaction_strategy().estimated_pending_compactions(cf);
|
||||
return make_ready_future<>();
|
||||
}).then([&tasks] {
|
||||
return std::move(tasks);
|
||||
});
|
||||
std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash> tasks;
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<utils::UUID, seastar::lw_shared_ptr<table>>& i) {
|
||||
table& cf = *i.second.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.get_compaction_strategy().estimated_pending_compactions(cf);
|
||||
return make_ready_future<>();
|
||||
}).then([&tasks] {
|
||||
return tasks;
|
||||
});
|
||||
}, std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>(), sum_pending_tasks).then(
|
||||
[](const std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>& task_map) {
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api/api-doc/error_injection.json.hh"
|
||||
#include "api/api.hh"
|
||||
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "seastar/core/future-util.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace hf = httpd::error_injection_json;
|
||||
|
||||
void set_error_injection(http_context& ctx, routes& r) {
|
||||
|
||||
hf::enable_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->param["injection"];
|
||||
bool one_shot = req->get_query_param("one_shot") == "True";
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.enable_on_all(injection, one_shot).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
|
||||
hf::get_enabled_injections_on_all.set(r, [](std::unique_ptr<request> req) {
|
||||
auto& errinj = utils::get_local_injector();
|
||||
auto ret = errinj.enabled_injections_on_all();
|
||||
return make_ready_future<json::json_return_type>(ret);
|
||||
});
|
||||
|
||||
hf::disable_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->param["injection"];
|
||||
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all(injection).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
|
||||
hf::disable_on_all.set(r, [](std::unique_ptr<request> req) {
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all().then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
} // namespace api
|
||||
@@ -1,30 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_error_injection(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
@@ -27,7 +27,6 @@
|
||||
#include "db/config.hh"
|
||||
#include "utils/histogram.hh"
|
||||
#include "database.hh"
|
||||
#include "seastar/core/scheduling_specific.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -35,70 +34,12 @@ namespace sp = httpd::storage_proxy_json;
|
||||
using proxy = service::storage_proxy;
|
||||
using namespace json;
|
||||
|
||||
|
||||
/**
|
||||
* This function implement a two dimentional map reduce where
|
||||
* the first level is a distributed storage_proxy class and the
|
||||
* second level is the stats per scheduling group class.
|
||||
* @param d - a reference to the storage_proxy distributed class.
|
||||
* @param mapper - the internal mapper that is used to map the internal
|
||||
* stat class into a value of type `V`.
|
||||
* @param reducer - the reducer that is used in both outer and inner
|
||||
* aggregations.
|
||||
* @param initial_value - the initial value to use for both aggregations
|
||||
* @return A future that resolves to the result of the aggregation.
|
||||
*/
|
||||
template<typename V, typename Reducer, typename InnerMapper>
|
||||
future<V> two_dimensional_map_reduce(distributed<service::storage_proxy>& d,
|
||||
InnerMapper mapper, Reducer reducer, V initial_value) {
|
||||
return d.map_reduce0( [mapper, reducer, initial_value] (const service::storage_proxy& sp) {
|
||||
return map_reduce_scheduling_group_specific<service::storage_proxy_stats::stats>(
|
||||
mapper, reducer, initial_value, sp.get_stats_key());
|
||||
}, initial_value, reducer);
|
||||
static future<utils::rate_moving_average> sum_timed_rate(distributed<proxy>& d, utils::timed_rate_moving_average proxy::stats::*f) {
|
||||
return d.map_reduce0([f](const proxy& p) {return (p.get_stats().*f).rate();}, utils::rate_moving_average(),
|
||||
std::plus<utils::rate_moving_average>());
|
||||
}
|
||||
|
||||
/**
|
||||
* This function implement a two dimentional map reduce where
|
||||
* the first level is a distributed storage_proxy class and the
|
||||
* second level is the stats per scheduling group class.
|
||||
* @param d - a reference to the storage_proxy distributed class.
|
||||
* @param f - a field pointer which is the implicit internal reducer.
|
||||
* @param reducer - the reducer that is used in both outer and inner
|
||||
* aggregations.
|
||||
* @param initial_value - the initial value to use for both aggregations* @return
|
||||
* @return A future that resolves to the result of the aggregation.
|
||||
*/
|
||||
template<typename V, typename Reducer, typename F>
|
||||
future<V> two_dimensional_map_reduce(distributed<service::storage_proxy>& d,
|
||||
V F::*f, Reducer reducer, V initial_value) {
|
||||
return two_dimensional_map_reduce(d, [f] (F& stats) {
|
||||
return stats.*f;
|
||||
}, reducer, initial_value);
|
||||
}
|
||||
|
||||
/**
|
||||
* A partial Specialization of sum_stats for the storage proxy
|
||||
* case where the get stats function doesn't return a
|
||||
* stats object with fields but a per scheduling group
|
||||
* stats object, the name was also changed since functions
|
||||
* partial specialization is not supported in C++.
|
||||
*
|
||||
*/
|
||||
template<typename V, typename F>
|
||||
future<json::json_return_type> sum_stats_storage_proxy(distributed<proxy>& d, V F::*f) {
|
||||
return two_dimensional_map_reduce(d, [f] (F& stats) { return stats.*f; }, std::plus<V>(), V(0)).then([] (V val) {
|
||||
return make_ready_future<json::json_return_type>(val);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
static future<utils::rate_moving_average> sum_timed_rate(distributed<proxy>& d, utils::timed_rate_moving_average service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).rate();
|
||||
}, std::plus<utils::rate_moving_average>(), utils::rate_moving_average());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_timed_rate_as_obj(distributed<proxy>& d, utils::timed_rate_moving_average service::storage_proxy_stats::stats::*f) {
|
||||
static future<json::json_return_type> sum_timed_rate_as_obj(distributed<proxy>& d, utils::timed_rate_moving_average proxy::stats::*f) {
|
||||
return sum_timed_rate(d, f).then([](const utils::rate_moving_average& val) {
|
||||
httpd::utils_json::rate_moving_average m;
|
||||
m = val;
|
||||
@@ -110,72 +51,29 @@ httpd::utils_json::rate_moving_average_and_histogram get_empty_moving_average()
|
||||
return timer_to_json(utils::rate_moving_average_and_histogram());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_timed_rate_as_long(distributed<proxy>& d, utils::timed_rate_moving_average service::storage_proxy_stats::stats::*f) {
|
||||
static future<json::json_return_type> sum_timed_rate_as_long(distributed<proxy>& d, utils::timed_rate_moving_average proxy::stats::*f) {
|
||||
return sum_timed_rate(d, f).then([](const utils::rate_moving_average& val) {
|
||||
return make_ready_future<json::json_return_type>(val.count);
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(ctx.sp, f, utils::estimated_histogram_merge,
|
||||
utils::estimated_histogram()).then([](const utils::estimated_histogram& val) {
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::estimated_histogram proxy::stats::*f) {
|
||||
return ctx.sp.map_reduce0([f](const proxy& p) {return p.get_stats().*f;}, utils::estimated_histogram(),
|
||||
utils::estimated_histogram_merge).then([](const utils::estimated_histogram& val) {
|
||||
utils_json::estimated_histogram res;
|
||||
res = val;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> total_latency(http_context& ctx, utils::timed_rate_moving_average_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(ctx.sp, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).hist.mean * (stats.*f).hist.count;
|
||||
}, std::plus<double>(), 0.0).then([](double val) {
|
||||
static future<json::json_return_type> total_latency(http_context& ctx, utils::timed_rate_moving_average_and_histogram proxy::stats::*f) {
|
||||
return ctx.sp.map_reduce0([f](const proxy& p) {return (p.get_stats().*f).hist.mean * (p.get_stats().*f).hist.count;}, 0.0,
|
||||
std::plus<double>()).then([](double val) {
|
||||
int64_t res = val;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* A partial Specialization of sum_histogram_stats
|
||||
* for the storage proxy case where the get stats
|
||||
* function doesn't return a stats object with
|
||||
* fields but a per scheduling group stats object,
|
||||
* the name was also changed since function partial
|
||||
* specialization is not supported in C++.
|
||||
*/
|
||||
template<typename F>
|
||||
future<json::json_return_type>
|
||||
sum_histogram_stats_storage_proxy(distributed<proxy>& d,
|
||||
utils::timed_rate_moving_average_and_histogram F::*f) {
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).hist;
|
||||
}, std::plus<utils::ihistogram>(), utils::ihistogram()).
|
||||
then([](const utils::ihistogram& val) {
|
||||
return make_ready_future<json::json_return_type>(to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* A partial Specialization of sum_timer_stats for the
|
||||
* storage proxy case where the get stats function
|
||||
* doesn't return a stats object with fields but a
|
||||
* per scheduling group stats object, the name
|
||||
* was also changed since partial function specialization
|
||||
* is not supported in C++.
|
||||
*/
|
||||
template<typename F>
|
||||
future<json::json_return_type>
|
||||
sum_timer_stats_storage_proxy(distributed<proxy>& d,
|
||||
utils::timed_rate_moving_average_and_histogram F::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).rate();
|
||||
}, std::plus<utils::rate_moving_average_and_histogram>(),
|
||||
utils::rate_moving_average_and_histogram()).then([](const utils::rate_moving_average_and_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(timer_to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
sp::get_total_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
@@ -183,9 +81,12 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_hinted_handoff_enabled.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto enabled = ctx.db.local().get_config().hinted_handoff_enabled();
|
||||
return make_ready_future<json::json_return_type>(enabled);
|
||||
sp::get_hinted_handoff_enabled.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
// hinted handoff is not supported currently,
|
||||
// so we should return false
|
||||
return make_ready_future<json::json_return_type>(false);
|
||||
});
|
||||
|
||||
sp::set_hinted_handoff_enabled.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -325,15 +226,15 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
sp::get_read_repair_attempted.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read_repair_attempts);
|
||||
return sum_stats(ctx.sp, &proxy::stats::read_repair_attempts);
|
||||
});
|
||||
|
||||
sp::get_read_repair_repaired_blocking.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read_repair_repaired_blocking);
|
||||
return sum_stats(ctx.sp, &proxy::stats::read_repair_repaired_blocking);
|
||||
});
|
||||
|
||||
sp::get_read_repair_repaired_background.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read_repair_repaired_background);
|
||||
return sum_stats(ctx.sp, &proxy::stats::read_repair_repaired_background);
|
||||
});
|
||||
|
||||
sp::get_schema_versions.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -349,119 +250,151 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
sp::get_cas_read_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_read_timeouts);
|
||||
sp::get_cas_read_timeouts.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
// cas is not supported yet, so just return 0
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_read_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_read_unavailables);
|
||||
sp::get_cas_read_unavailables.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
// cas is not supported yet, so just return 0
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_write_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_write_timeouts);
|
||||
sp::get_cas_write_timeouts.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
// cas is not supported yet, so just return 0
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_write_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_write_unavailables);
|
||||
sp::get_cas_write_unavailables.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
// cas is not supported yet, so just return 0
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_unfinished_commit.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_write_unfinished_commit);
|
||||
sp::get_cas_write_metrics_unfinished_commit.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_contention.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::cas_write_contention);
|
||||
sp::get_cas_write_metrics_contention.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_condition_not_met.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_write_condition_not_met);
|
||||
sp::get_cas_write_metrics_condition_not_met.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_failed_read_round_optimization.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_failed_read_round_optimization);
|
||||
sp::get_cas_read_metrics_unfinished_commit.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_read_metrics_unfinished_commit.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_read_unfinished_commit);
|
||||
sp::get_cas_read_metrics_contention.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_cas_read_metrics_contention.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::cas_read_contention);
|
||||
sp::get_cas_read_metrics_condition_not_met.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::read_timeouts);
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::read_timeouts);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::read_unavailables);
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::read_unavailables);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::range_slice_timeouts);
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::range_slice_timeouts);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::range_slice_unavailables);
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::range_slice_unavailables);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::write_timeouts);
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::write_timeouts);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::write_unavailables);
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::write_unavailables);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_timeouts_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::read_timeouts);
|
||||
return sum_timed_rate_as_obj(ctx.sp, &proxy::stats::read_timeouts);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_unavailables_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::read_unavailables);
|
||||
return sum_timed_rate_as_obj(ctx.sp, &proxy::stats::read_unavailables);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_timeouts_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::range_slice_timeouts);
|
||||
return sum_timed_rate_as_obj(ctx.sp, &proxy::stats::range_slice_timeouts);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_unavailables_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::range_slice_unavailables);
|
||||
return sum_timed_rate_as_obj(ctx.sp, &proxy::stats::range_slice_unavailables);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_timeouts_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::write_timeouts);
|
||||
return sum_timed_rate_as_obj(ctx.sp, &proxy::stats::write_timeouts);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_unavailables_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::write_unavailables);
|
||||
return sum_timed_rate_as_obj(ctx.sp, &proxy::stats::write_unavailables);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_latency_histogram_depricated.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::range);
|
||||
return sum_histogram_stats(ctx.sp, &proxy::stats::range);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_latency_histogram_depricated.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::write);
|
||||
return sum_histogram_stats(ctx.sp, &proxy::stats::write);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_latency_histogram_depricated.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read);
|
||||
return sum_histogram_stats(ctx.sp, &proxy::stats::read);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::range);
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::range);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::write);
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::write);
|
||||
});
|
||||
sp::get_cas_write_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::cas_write);
|
||||
//TBD
|
||||
// FIXME
|
||||
// cas is not supported yet, so just return empty moving average
|
||||
|
||||
return make_ready_future<json::json_return_type>(get_empty_moving_average());
|
||||
});
|
||||
|
||||
sp::get_cas_read_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::cas_read);
|
||||
//TBD
|
||||
// FIXME
|
||||
// cas is not supported yet, so just return empty moving average
|
||||
|
||||
return make_ready_future<json::json_return_type>(get_empty_moving_average());
|
||||
});
|
||||
|
||||
sp::get_view_write_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
@@ -473,30 +406,30 @@ void set_storage_proxy(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
sp::get_read_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read);
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::read);
|
||||
});
|
||||
|
||||
sp::get_read_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::estimated_read);
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::estimated_read);
|
||||
});
|
||||
|
||||
sp::get_read_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::read);
|
||||
return total_latency(ctx, &proxy::stats::read);
|
||||
});
|
||||
sp::get_write_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::estimated_write);
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::estimated_write);
|
||||
});
|
||||
|
||||
sp::get_write_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::write);
|
||||
return total_latency(ctx, &proxy::stats::write);
|
||||
});
|
||||
|
||||
sp::get_range_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::range);
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::range);
|
||||
});
|
||||
|
||||
sp::get_range_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::range);
|
||||
return total_latency(ctx, &proxy::stats::range);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -27,7 +27,6 @@
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <boost/range/adaptor/filtered.hpp>
|
||||
#include "service/storage_service.hh"
|
||||
#include "service/load_meter.hh"
|
||||
#include "db/commitlog/commitlog.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
@@ -42,6 +41,8 @@
|
||||
#include "database.hh"
|
||||
#include "db/extensions.hh"
|
||||
|
||||
sstables::sstable::version_types get_highest_supported_format();
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace ss = httpd::storage_service_json;
|
||||
@@ -54,70 +55,57 @@ static sstring validate_keyspace(http_context& ctx, const parameters& param) {
|
||||
throw bad_param_exception("Keyspace " + param["keyspace"] + " Does not exist");
|
||||
}
|
||||
|
||||
static ss::token_range token_range_endpoints_to_json(const dht::token_range_endpoints& d) {
|
||||
ss::token_range r;
|
||||
r.start_token = d._start_token;
|
||||
r.end_token = d._end_token;
|
||||
r.endpoints = d._endpoints;
|
||||
r.rpc_endpoints = d._rpc_endpoints;
|
||||
for (auto det : d._endpoint_details) {
|
||||
ss::endpoint_detail ed;
|
||||
ed.host = det._host;
|
||||
ed.datacenter = det._datacenter;
|
||||
if (det._rack != "") {
|
||||
ed.rack = det._rack;
|
||||
}
|
||||
r.endpoint_details.push(ed);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
using ks_cf_func = std::function<future<json::json_return_type>(http_context&, std::unique_ptr<request>, sstring, std::vector<sstring>)>;
|
||||
|
||||
static auto wrap_ks_cf(http_context &ctx, ks_cf_func f) {
|
||||
return [&ctx, f = std::move(f)](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
return f(ctx, std::move(req), std::move(keyspace), std::move(column_families));
|
||||
};
|
||||
}
|
||||
|
||||
future<> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
return ctx.db.invoke_on_all([keyspace, tables, enabled] (database& db) {
|
||||
return parallel_for_each(tables, [&db, keyspace, enabled](const sstring& table) mutable {
|
||||
column_family& cf = db.find_column_family(keyspace, table);
|
||||
if (enabled) {
|
||||
cf.enable_auto_compaction();
|
||||
} else {
|
||||
cf.disable_auto_compaction();
|
||||
static std::vector<ss::token_range> describe_ring(const sstring& keyspace) {
|
||||
std::vector<ss::token_range> res;
|
||||
for (auto d : service::get_local_storage_service().describe_ring(keyspace)) {
|
||||
ss::token_range r;
|
||||
r.start_token = d._start_token;
|
||||
r.end_token = d._end_token;
|
||||
r.endpoints = d._endpoints;
|
||||
r.rpc_endpoints = d._rpc_endpoints;
|
||||
for (auto det : d._endpoint_details) {
|
||||
ss::endpoint_detail ed;
|
||||
ed.host = det._host;
|
||||
ed.datacenter = det._datacenter;
|
||||
if (det._rack != "") {
|
||||
ed.rack = det._rack;
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
r.endpoint_details.push(ed);
|
||||
}
|
||||
res.push_back(r);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r) {
|
||||
using ks_cf_func = std::function<future<json::json_return_type>(std::unique_ptr<request>, sstring, std::vector<sstring>)>;
|
||||
|
||||
auto wrap_ks_cf = [&ctx](ks_cf_func f) {
|
||||
return [&ctx, f = std::move(f)](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
return f(std::move(req), std::move(keyspace), std::move(column_families));
|
||||
};
|
||||
};
|
||||
|
||||
ss::local_hostid.set(r, [](std::unique_ptr<request> req) {
|
||||
return db::system_keyspace::get_local_host_id().then([](const utils::UUID& id) {
|
||||
return make_ready_future<json::json_return_type>(id.to_sstring());
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_tokens.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(ctx.token_metadata.local().sorted_tokens(), [](const dht::token& i) {
|
||||
ss::get_tokens.set(r, [] (std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(service::get_local_storage_service().get_token_metadata().sorted_tokens(), [](const dht::token& i) {
|
||||
return boost::lexical_cast<std::string>(i);
|
||||
}));
|
||||
});
|
||||
|
||||
ss::get_node_tokens.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
ss::get_node_tokens.set(r, [] (std::unique_ptr<request> req) {
|
||||
gms::inet_address addr(req->param["endpoint"]);
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(ctx.token_metadata.local().get_tokens(addr), [](const dht::token& i) {
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(service::get_local_storage_service().get_token_metadata().get_tokens(addr), [](const dht::token& i) {
|
||||
return boost::lexical_cast<std::string>(i);
|
||||
}));
|
||||
});
|
||||
@@ -135,8 +123,8 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
}));
|
||||
});
|
||||
|
||||
ss::get_leaving_nodes.set(r, [&ctx](const_req req) {
|
||||
return container_to_vec(ctx.token_metadata.local().get_leaving_endpoints());
|
||||
ss::get_leaving_nodes.set(r, [](const_req req) {
|
||||
return container_to_vec(service::get_local_storage_service().get_token_metadata().get_leaving_endpoints());
|
||||
});
|
||||
|
||||
ss::get_moving_nodes.set(r, [](const_req req) {
|
||||
@@ -144,8 +132,8 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
return container_to_vec(addr);
|
||||
});
|
||||
|
||||
ss::get_joining_nodes.set(r, [&ctx](const_req req) {
|
||||
auto points = ctx.token_metadata.local().get_bootstrap_tokens();
|
||||
ss::get_joining_nodes.set(r, [](const_req req) {
|
||||
auto points = service::get_local_storage_service().get_token_metadata().get_bootstrap_tokens();
|
||||
std::unordered_set<sstring> addr;
|
||||
for (auto i: points) {
|
||||
addr.insert(boost::lexical_cast<std::string>(i.second));
|
||||
@@ -188,26 +176,27 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
ss::describe_any_ring.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(service::get_local_storage_service().describe_ring(""), token_range_endpoints_to_json));
|
||||
ss::describe_any_ring.set(r, [&ctx](const_req req) {
|
||||
return describe_ring("");
|
||||
});
|
||||
|
||||
ss::describe_ring.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(service::get_local_storage_service().describe_ring(keyspace), token_range_endpoints_to_json));
|
||||
ss::describe_ring.set(r, [&ctx](const_req req) {
|
||||
auto keyspace = validate_keyspace(ctx, req.param);
|
||||
return describe_ring(keyspace);
|
||||
});
|
||||
|
||||
ss::get_host_id_map.set(r, [&ctx](const_req req) {
|
||||
ss::get_host_id_map.set(r, [](const_req req) {
|
||||
std::vector<ss::mapper> res;
|
||||
return map_to_key_value(ctx.token_metadata.local().get_endpoint_to_host_id_map_for_reading(), res);
|
||||
return map_to_key_value(service::get_local_storage_service().
|
||||
get_token_metadata().get_endpoint_to_host_id_map_for_reading(), res);
|
||||
});
|
||||
|
||||
ss::get_load.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &column_family_stats::live_disk_space_used);
|
||||
return get_cf_stats(ctx, &column_family::stats::live_disk_space_used);
|
||||
});
|
||||
|
||||
ss::get_load_map.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.lmeter.get_load_map().then([] (auto&& load_map) {
|
||||
ss::get_load_map.set(r, [] (std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().get_load_map().then([] (auto&& load_map) {
|
||||
std::vector<ss::map_string_double> res;
|
||||
for (auto i : load_map) {
|
||||
ss::map_string_double val;
|
||||
@@ -232,6 +221,64 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
req.get_query_param("key")));
|
||||
});
|
||||
|
||||
ss::get_snapshot_details.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().get_snapshot_details().then([] (auto result) {
|
||||
std::vector<ss::snapshots> res;
|
||||
for (auto& map: result) {
|
||||
ss::snapshots all_snapshots;
|
||||
all_snapshots.key = map.first;
|
||||
|
||||
std::vector<ss::snapshot> snapshot;
|
||||
for (auto& cf: map.second) {
|
||||
ss::snapshot s;
|
||||
s.ks = cf.ks;
|
||||
s.cf = cf.cf;
|
||||
s.live = cf.live;
|
||||
s.total = cf.total;
|
||||
snapshot.push_back(std::move(s));
|
||||
}
|
||||
all_snapshots.value = std::move(snapshot);
|
||||
res.push_back(std::move(all_snapshots));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(std::move(res));
|
||||
});
|
||||
});
|
||||
|
||||
ss::take_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto column_family = req->get_query_param("cf");
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
|
||||
auto resp = make_ready_future<>();
|
||||
if (column_family.empty()) {
|
||||
resp = service::get_local_storage_service().take_snapshot(tag, keynames);
|
||||
} else {
|
||||
if (keynames.size() > 1) {
|
||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||
}
|
||||
resp = service::get_local_storage_service().take_column_family_snapshot(keynames[0], column_family, tag);
|
||||
}
|
||||
return resp.then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::del_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
auto tag = req->get_query_param("tag");
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
return service::get_local_storage_service().clear_snapshot(tag, keynames).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::true_snapshots_size.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().true_snapshots_size().then([] (int64_t size) {
|
||||
return make_ready_future<json::json_return_type>(size);
|
||||
});
|
||||
});
|
||||
|
||||
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
@@ -257,28 +304,46 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
return service::get_local_storage_service().is_cleanup_allowed(keyspace).then([&ctx, keyspace,
|
||||
column_families = std::move(column_families)] (bool is_cleanup_allowed) mutable {
|
||||
if (!is_cleanup_allowed) {
|
||||
return make_exception_future<json::json_return_type>(
|
||||
std::runtime_error("Can not perform cleanup operation when topology changes"));
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (database& db) {
|
||||
std::vector<column_family*> column_families_vec;
|
||||
auto& cm = db.get_compaction_manager();
|
||||
for (auto cf : column_families) {
|
||||
column_families_vec.push_back(&db.find_column_family(keyspace, cf));
|
||||
}
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (database& db) {
|
||||
std::vector<column_family*> column_families_vec;
|
||||
auto& cm = db.get_compaction_manager();
|
||||
for (auto cf : column_families) {
|
||||
column_families_vec.push_back(&db.find_column_family(keyspace, cf));
|
||||
}
|
||||
return parallel_for_each(column_families_vec, [&cm, &db] (column_family* cf) {
|
||||
return cm.perform_cleanup(db, cf);
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
return parallel_for_each(column_families_vec, [&cm] (column_family* cf) {
|
||||
return cm.perform_cleanup(cf);
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
});
|
||||
|
||||
ss::upgrade_sstables.set(r, wrap_ks_cf(ctx, [] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
ss::scrub.set(r, wrap_ks_cf([&ctx](std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
// TODO: respect this
|
||||
auto skip_corrupted = req->get_query_param("skip_corrupted");
|
||||
|
||||
auto f = make_ready_future<>();
|
||||
if (!req_param<bool>(*req, "disable_snapshot", false)) {
|
||||
auto tag = format("pre-scrub-{:d}", db_clock::now().time_since_epoch().count());
|
||||
f = parallel_for_each(column_families, [keyspace, tag](sstring cf) {
|
||||
return service::get_local_storage_service().take_column_family_snapshot(keyspace, cf, tag);
|
||||
});
|
||||
}
|
||||
|
||||
return f.then([&ctx, keyspace, column_families] {
|
||||
return ctx.db.invoke_on_all([=] (database& db) {
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto& cf = db.find_column_family(keyspace, cfname);
|
||||
return cm.perform_sstable_scrub(&cf);
|
||||
});
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
}));
|
||||
|
||||
ss::upgrade_sstables.set(r, wrap_ks_cf([&ctx](std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
bool exclude_current_version = req_param<bool>(*req, "exclude_current_version", false);
|
||||
|
||||
return ctx.db.invoke_on_all([=] (database& db) {
|
||||
@@ -533,7 +598,9 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::join_ring.set(r, [](std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
return service::get_local_storage_service().join_ring().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_joined.set(r, [] (std::unique_ptr<request> req) {
|
||||
@@ -661,7 +728,7 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
|
||||
ss::set_trace_probability.set(r, [](std::unique_ptr<request> req) {
|
||||
auto probability = req->get_query_param("probability");
|
||||
return futurize_invoke([probability] {
|
||||
return futurize<json::json_return_type>::apply([probability] {
|
||||
double real_prob = std::stod(probability.c_str());
|
||||
return tracing::tracing::tracing_instance().invoke_on_all([real_prob] (auto& local_tracing) {
|
||||
local_tracing.set_trace_probability(real_prob);
|
||||
@@ -716,19 +783,19 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = split_cf(req->get_query_param("cf"));
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true).then([]{
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = split_cf(req->get_query_param("cf"));
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false).then([]{
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::deliver_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -793,7 +860,7 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
ss::get_metrics_load.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &column_family_stats::live_disk_space_used);
|
||||
return get_cf_stats(ctx, &column_family::stats::live_disk_space_used);
|
||||
});
|
||||
|
||||
ss::get_exceptions.set(r, [](const_req req) {
|
||||
@@ -964,107 +1031,4 @@ void set_storage_service(http_context& ctx, routes& r) {
|
||||
|
||||
}
|
||||
|
||||
void set_snapshot(http_context& ctx, routes& r) {
|
||||
ss::get_snapshot_details.set(r, [](std::unique_ptr<request> req) {
|
||||
std::function<future<>(output_stream<char>&&)> f = [](output_stream<char>&& s) {
|
||||
return do_with(output_stream<char>(std::move(s)), true, [] (output_stream<char>& s, bool& first){
|
||||
return s.write("[").then([&s, &first] {
|
||||
return service::get_local_storage_service().get_snapshot_details().then([&s, &first] (std::unordered_map<sstring, std::vector<service::storage_service::snapshot_details>>&& result) {
|
||||
return do_with(std::move(result), [&s, &first](const std::unordered_map<sstring, std::vector<service::storage_service::snapshot_details>>& result) {
|
||||
return do_for_each(result, [&s, &result,&first](std::tuple<sstring, std::vector<service::storage_service::snapshot_details>>&& map){
|
||||
return do_with(ss::snapshots(), [&s, &first, &result, &map](ss::snapshots& all_snapshots) {
|
||||
all_snapshots.key = std::get<0>(map);
|
||||
future<> f = first ? make_ready_future<>() : s.write(", ");
|
||||
first = false;
|
||||
std::vector<ss::snapshot> snapshot;
|
||||
for (auto& cf: std::get<1>(map)) {
|
||||
ss::snapshot snp;
|
||||
snp.ks = cf.ks;
|
||||
snp.cf = cf.cf;
|
||||
snp.live = cf.live;
|
||||
snp.total = cf.total;
|
||||
snapshot.push_back(std::move(snp));
|
||||
}
|
||||
all_snapshots.value = std::move(snapshot);
|
||||
return f.then([&s, &all_snapshots] {
|
||||
return all_snapshots.write(s);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}).then([&s] {
|
||||
return s.write("]").then([&s] {
|
||||
return s.close();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
return make_ready_future<json::json_return_type>(std::move(f));
|
||||
});
|
||||
|
||||
ss::take_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto column_family = req->get_query_param("cf");
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
|
||||
auto resp = make_ready_future<>();
|
||||
if (column_family.empty()) {
|
||||
resp = service::get_local_storage_service().take_snapshot(tag, keynames);
|
||||
} else {
|
||||
if (keynames.empty()) {
|
||||
throw httpd::bad_param_exception("The keyspace of column families must be specified");
|
||||
}
|
||||
if (keynames.size() > 1) {
|
||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||
}
|
||||
resp = service::get_local_storage_service().take_column_family_snapshot(keynames[0], column_family, tag);
|
||||
}
|
||||
return resp.then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::del_snapshot.set(r, [](std::unique_ptr<request> req) {
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto column_family = req->get_query_param("cf");
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
return service::get_local_storage_service().clear_snapshot(tag, keynames, column_family).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::true_snapshots_size.set(r, [](std::unique_ptr<request> req) {
|
||||
return service::get_local_storage_service().true_snapshots_size().then([] (int64_t size) {
|
||||
return make_ready_future<json::json_return_type>(size);
|
||||
});
|
||||
});
|
||||
|
||||
ss::scrub.set(r, wrap_ks_cf(ctx, [] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
const auto skip_corrupted = req_param<bool>(*req, "skip_corrupted", false);
|
||||
|
||||
auto f = make_ready_future<>();
|
||||
if (!req_param<bool>(*req, "disable_snapshot", false)) {
|
||||
auto tag = format("pre-scrub-{:d}", db_clock::now().time_since_epoch().count());
|
||||
f = parallel_for_each(column_families, [keyspace, tag](sstring cf) {
|
||||
return service::get_local_storage_service().take_column_family_snapshot(keyspace, cf, tag);
|
||||
});
|
||||
}
|
||||
|
||||
return f.then([&ctx, keyspace, column_families, skip_corrupted] {
|
||||
return ctx.db.invoke_on_all([=] (database& db) {
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto& cf = db.find_column_family(keyspace, cfname);
|
||||
return cm.perform_sstable_scrub(&cf, skip_corrupted);
|
||||
});
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -26,6 +26,5 @@
|
||||
namespace api {
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r);
|
||||
void set_snapshot(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "api/api-doc/system.json.hh"
|
||||
#include "api/api.hh"
|
||||
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
|
||||
@@ -31,10 +30,6 @@ namespace api {
|
||||
namespace hs = httpd::system_json;
|
||||
|
||||
void set_system(http_context& ctx, routes& r) {
|
||||
hs::get_system_uptime.set(r, [](const_req req) {
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(engine().uptime()).count();
|
||||
});
|
||||
|
||||
hs::get_all_logger_names.set(r, [](const_req req) {
|
||||
return logging::logger_registry().get_all_logger_names();
|
||||
});
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
|
||||
#include "atomic_cell.hh"
|
||||
#include "atomic_cell_or_collection.hh"
|
||||
#include "counters.hh"
|
||||
#include "types.hh"
|
||||
#include "types/collection.hh"
|
||||
|
||||
/// LSA mirator for cells with irrelevant type
|
||||
///
|
||||
@@ -148,6 +148,35 @@ atomic_cell_or_collection::atomic_cell_or_collection(const abstract_type& type,
|
||||
{
|
||||
}
|
||||
|
||||
static collection_mutation_view get_collection_mutation_view(const uint8_t* ptr)
|
||||
{
|
||||
auto f = data::cell::structure::get_member<data::cell::tags::flags>(ptr);
|
||||
auto ti = data::type_info::make_collection();
|
||||
data::cell::context ctx(f, ti);
|
||||
auto view = data::cell::structure::get_member<data::cell::tags::cell>(ptr).as<data::cell::tags::collection>(ctx);
|
||||
auto dv = data::cell::variable_value::make_view(view, f.get<data::cell::tags::external_data>());
|
||||
return collection_mutation_view { dv };
|
||||
}
|
||||
|
||||
collection_mutation_view atomic_cell_or_collection::as_collection_mutation() const {
|
||||
return get_collection_mutation_view(_data.get());
|
||||
}
|
||||
|
||||
collection_mutation::collection_mutation(const collection_type_impl& type, collection_mutation_view v)
|
||||
: _data(imr_object_type::make(data::cell::make_collection(v.data), &type.imr_state().lsa_migrator()))
|
||||
{
|
||||
}
|
||||
|
||||
collection_mutation::collection_mutation(const collection_type_impl& type, bytes_view v)
|
||||
: _data(imr_object_type::make(data::cell::make_collection(v), &type.imr_state().lsa_migrator()))
|
||||
{
|
||||
}
|
||||
|
||||
collection_mutation::operator collection_mutation_view() const
|
||||
{
|
||||
return get_collection_mutation_view(_data.get());
|
||||
}
|
||||
|
||||
bool atomic_cell_or_collection::equals(const abstract_type& type, const atomic_cell_or_collection& other) const
|
||||
{
|
||||
auto ptr_a = _data.get();
|
||||
@@ -202,7 +231,7 @@ size_t atomic_cell_or_collection::external_memory_usage(const abstract_type& t)
|
||||
size_t external_value_size = 0;
|
||||
if (flags.get<data::cell::tags::external_data>()) {
|
||||
if (flags.get<data::cell::tags::collection>()) {
|
||||
external_value_size = as_collection_mutation().data.size_bytes();
|
||||
external_value_size = get_collection_mutation_view(_data.get()).data.size_bytes();
|
||||
} else {
|
||||
auto cell_view = data::cell::atomic_cell_view(t.imr_state().type_info(), view);
|
||||
external_value_size = cell_view.value_size();
|
||||
@@ -215,61 +244,6 @@ size_t atomic_cell_or_collection::external_memory_usage(const abstract_type& t)
|
||||
+ imr_object_type::size_overhead + external_value_size;
|
||||
}
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& os, const atomic_cell_view& acv) {
|
||||
if (acv.is_live()) {
|
||||
return fmt_print(os, "atomic_cell{{{},ts={:d},expiry={:d},ttl={:d}}}",
|
||||
acv.is_counter_update()
|
||||
? "counter_update_value=" + to_sstring(acv.counter_update_value())
|
||||
: to_hex(acv.value().linearize()),
|
||||
acv.timestamp(),
|
||||
acv.is_live_and_has_ttl() ? acv.expiry().time_since_epoch().count() : -1,
|
||||
acv.is_live_and_has_ttl() ? acv.ttl().count() : 0);
|
||||
} else {
|
||||
return fmt_print(os, "atomic_cell{{DEAD,ts={:d},deletion_time={:d}}}",
|
||||
acv.timestamp(), acv.deletion_time().time_since_epoch().count());
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& os, const atomic_cell& ac) {
|
||||
return os << atomic_cell_view(ac);
|
||||
}
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& os, const atomic_cell_view::printer& acvp) {
|
||||
auto& type = acvp._type;
|
||||
auto& acv = acvp._cell;
|
||||
if (acv.is_live()) {
|
||||
std::ostringstream cell_value_string_builder;
|
||||
if (type.is_counter()) {
|
||||
if (acv.is_counter_update()) {
|
||||
cell_value_string_builder << "counter_update_value=" << acv.counter_update_value();
|
||||
} else {
|
||||
cell_value_string_builder << "shards: ";
|
||||
counter_cell_view::with_linearized(acv, [&cell_value_string_builder] (counter_cell_view& ccv) {
|
||||
cell_value_string_builder << ::join(", ", ccv.shards());
|
||||
});
|
||||
}
|
||||
} else {
|
||||
cell_value_string_builder << type.to_string(acv.value().linearize());
|
||||
}
|
||||
return fmt_print(os, "atomic_cell{{{},ts={:d},expiry={:d},ttl={:d}}}",
|
||||
cell_value_string_builder.str(),
|
||||
acv.timestamp(),
|
||||
acv.is_live_and_has_ttl() ? acv.expiry().time_since_epoch().count() : -1,
|
||||
acv.is_live_and_has_ttl() ? acv.ttl().count() : 0);
|
||||
} else {
|
||||
return fmt_print(os, "atomic_cell{{DEAD,ts={:d},deletion_time={:d}}}",
|
||||
acv.timestamp(), acv.deletion_time().time_since_epoch().count());
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream&
|
||||
operator<<(std::ostream& os, const atomic_cell::printer& acp) {
|
||||
return operator<<(os, static_cast<const atomic_cell_view::printer&>(acp));
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const atomic_cell_or_collection::printer& p) {
|
||||
if (!p._cell._data.get()) {
|
||||
return os << "{ null atomic_cell_or_collection }";
|
||||
@@ -279,9 +253,9 @@ std::ostream& operator<<(std::ostream& os, const atomic_cell_or_collection::prin
|
||||
if (dc::structure::get_member<dc::tags::flags>(p._cell._data.get()).get<dc::tags::collection>()) {
|
||||
os << "collection ";
|
||||
auto cmv = p._cell.as_collection_mutation();
|
||||
os << collection_mutation_view::printer(*p._cdef.type, cmv);
|
||||
os << to_hex(cmv.data.linearize());
|
||||
} else {
|
||||
os << atomic_cell_view::printer(*p._cdef.type, p._cell.as_atomic_cell(p._cdef));
|
||||
os << p._cell.as_atomic_cell(p._cdef);
|
||||
}
|
||||
return os << " }";
|
||||
}
|
||||
|
||||
@@ -153,14 +153,6 @@ public:
|
||||
}
|
||||
|
||||
friend std::ostream& operator<<(std::ostream& os, const atomic_cell_view& acv);
|
||||
|
||||
class printer {
|
||||
const abstract_type& _type;
|
||||
const atomic_cell_view& _cell;
|
||||
public:
|
||||
printer(const abstract_type& type, const atomic_cell_view& cell) : _type(type), _cell(cell) {}
|
||||
friend std::ostream& operator<<(std::ostream& os, const printer& acvp);
|
||||
};
|
||||
};
|
||||
|
||||
class atomic_cell_mutable_view final : public basic_atomic_cell_view<mutable_view::yes> {
|
||||
@@ -227,12 +219,30 @@ public:
|
||||
static atomic_cell make_live_uninitialized(const abstract_type& type, api::timestamp_type timestamp, size_t size);
|
||||
friend class atomic_cell_or_collection;
|
||||
friend std::ostream& operator<<(std::ostream& os, const atomic_cell& ac);
|
||||
};
|
||||
|
||||
class printer : atomic_cell_view::printer {
|
||||
public:
|
||||
printer(const abstract_type& type, const atomic_cell_view& cell) : atomic_cell_view::printer(type, cell) {}
|
||||
friend std::ostream& operator<<(std::ostream& os, const printer& acvp);
|
||||
};
|
||||
class collection_mutation_view;
|
||||
|
||||
// Represents a mutation of a collection. Actual format is determined by collection type,
|
||||
// and is:
|
||||
// set: list of atomic_cell
|
||||
// map: list of pair<atomic_cell, bytes> (for key/value)
|
||||
// list: tbd, probably ugly
|
||||
class collection_mutation {
|
||||
public:
|
||||
using imr_object_type = imr::utils::object<data::cell::structure>;
|
||||
imr_object_type _data;
|
||||
|
||||
collection_mutation() {}
|
||||
collection_mutation(const collection_type_impl&, collection_mutation_view v);
|
||||
collection_mutation(const collection_type_impl&, bytes_view bv);
|
||||
operator collection_mutation_view() const;
|
||||
};
|
||||
|
||||
|
||||
class collection_mutation_view {
|
||||
public:
|
||||
atomic_cell_value_view data;
|
||||
};
|
||||
|
||||
class column_definition;
|
||||
|
||||
@@ -34,12 +34,14 @@ template<>
|
||||
struct appending_hash<collection_mutation_view> {
|
||||
template<typename Hasher>
|
||||
void operator()(Hasher& h, collection_mutation_view cell, const column_definition& cdef) const {
|
||||
cell.with_deserialized(*cdef.type, [&] (collection_mutation_view_description m_view) {
|
||||
::feed_hash(h, m_view.tomb);
|
||||
for (auto&& key_and_value : m_view.cells) {
|
||||
::feed_hash(h, key_and_value.first);
|
||||
::feed_hash(h, key_and_value.second, cdef);
|
||||
}
|
||||
cell.data.with_linearized([&] (bytes_view cell_bv) {
|
||||
auto ctype = static_pointer_cast<const collection_type_impl>(cdef.type);
|
||||
auto m_view = ctype->deserialize_mutation_form(cell_bv);
|
||||
::feed_hash(h, m_view.tomb);
|
||||
for (auto&& key_and_value : m_view.cells) {
|
||||
::feed_hash(h, key_and_value.first);
|
||||
::feed_hash(h, key_and_value.second, cdef);
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "atomic_cell.hh"
|
||||
#include "collection_mutation.hh"
|
||||
#include "schema.hh"
|
||||
#include "hashing.hh"
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ public:
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
virtual std::string_view qualified_java_name() const override {
|
||||
virtual const sstring& qualified_java_name() const override {
|
||||
return allow_all_authenticator_name();
|
||||
}
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
virtual std::string_view qualified_java_name() const override {
|
||||
virtual const sstring& qualified_java_name() const override {
|
||||
return allow_all_authorizer_name();
|
||||
}
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ public:
|
||||
///
|
||||
/// A fully-qualified (class with package) Java-like name for this implementation.
|
||||
///
|
||||
virtual std::string_view qualified_java_name() const = 0;
|
||||
virtual const sstring& qualified_java_name() const = 0;
|
||||
|
||||
virtual bool require_authentication() const = 0;
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ public:
|
||||
///
|
||||
/// A fully-qualified (class with package) Java-like name for this implementation.
|
||||
///
|
||||
virtual std::string_view qualified_java_name() const = 0;
|
||||
virtual const sstring& qualified_java_name() const = 0;
|
||||
|
||||
///
|
||||
/// Query for the permissions granted directly to a role for a particular \ref resource (and not any of its
|
||||
|
||||
@@ -59,22 +59,22 @@ future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_f
|
||||
}).discard_result();
|
||||
}
|
||||
|
||||
static future<> create_metadata_table_if_missing_impl(
|
||||
future<> create_metadata_table_if_missing(
|
||||
std::string_view table_name,
|
||||
cql3::query_processor& qp,
|
||||
std::string_view cql,
|
||||
::service::migration_manager& mm) {
|
||||
static auto ignore_existing = [] (seastar::noncopyable_function<future<>()> func) {
|
||||
return futurize_invoke(std::move(func)).handle_exception_type([] (exceptions::already_exists_exception& ignored) { });
|
||||
return futurize_apply(std::move(func)).handle_exception_type([] (exceptions::already_exists_exception& ignored) { });
|
||||
};
|
||||
auto& db = qp.db();
|
||||
auto parsed_statement = cql3::query_processor::parse_statement(cql);
|
||||
auto& parsed_cf_statement = static_cast<cql3::statements::raw::cf_statement&>(*parsed_statement);
|
||||
auto parsed_statement = static_pointer_cast<cql3::statements::raw::cf_statement>(
|
||||
cql3::query_processor::parse_statement(cql));
|
||||
|
||||
parsed_cf_statement.prepare_keyspace(meta::AUTH_KS);
|
||||
parsed_statement->prepare_keyspace(meta::AUTH_KS);
|
||||
|
||||
auto statement = static_pointer_cast<cql3::statements::create_table_statement>(
|
||||
parsed_cf_statement.prepare(db, qp.get_cql_stats())->statement);
|
||||
parsed_statement->prepare(db, qp.get_cql_stats())->statement);
|
||||
|
||||
const auto schema = statement->get_cf_meta_data(qp.db());
|
||||
const auto uuid = generate_legacy_id(schema->ks_name(), schema->cf_name());
|
||||
@@ -85,14 +85,7 @@ static future<> create_metadata_table_if_missing_impl(
|
||||
return ignore_existing([&mm, table = std::move(table)] () {
|
||||
return mm.announce_new_column_family(table, false);
|
||||
});
|
||||
}
|
||||
|
||||
future<> create_metadata_table_if_missing(
|
||||
std::string_view table_name,
|
||||
cql3::query_processor& qp,
|
||||
std::string_view cql,
|
||||
::service::migration_manager& mm) noexcept {
|
||||
return futurize_invoke(create_metadata_table_if_missing_impl, table_name, qp, cql, mm);
|
||||
}
|
||||
|
||||
future<> wait_for_schema_agreement(::service::migration_manager& mm, const database& db, seastar::abort_source& as) {
|
||||
|
||||
@@ -27,10 +27,9 @@
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/core/resource.hh>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/core/smp.hh>
|
||||
|
||||
#include "log.hh"
|
||||
#include "seastarx.hh"
|
||||
@@ -62,7 +61,7 @@ extern const sstring AUTH_PACKAGE_NAME;
|
||||
|
||||
template <class Task>
|
||||
future<> once_among_shards(Task&& f) {
|
||||
if (this_shard_id() == 0u) {
|
||||
if (engine().cpu_id() == 0u) {
|
||||
return f();
|
||||
}
|
||||
|
||||
@@ -80,7 +79,7 @@ future<> create_metadata_table_if_missing(
|
||||
std::string_view table_name,
|
||||
cql3::query_processor&,
|
||||
std::string_view cql,
|
||||
::service::migration_manager&) noexcept;
|
||||
::service::migration_manager&);
|
||||
|
||||
future<> wait_for_schema_agreement(::service::migration_manager&, const database&, seastar::abort_source&);
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ extern "C" {
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/range.hpp>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/reactor.hh>
|
||||
|
||||
#include "auth/authenticated_user.hh"
|
||||
#include "auth/common.hh"
|
||||
@@ -101,7 +101,7 @@ bool default_authorizer::legacy_metadata_exists() const {
|
||||
future<bool> default_authorizer::any_granted() const {
|
||||
static const sstring query = format("SELECT * FROM {}.{} LIMIT 1", meta::AUTH_KS, PERMISSIONS_CF);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
infinite_timeout_config,
|
||||
@@ -115,7 +115,7 @@ future<> default_authorizer::migrate_legacy_metadata() const {
|
||||
alogger.info("Starting migration of legacy permissions metadata.");
|
||||
static const sstring query = format("SELECT * FROM {}.{}", meta::AUTH_KS, legacy_table_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
infinite_timeout_config).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
@@ -195,7 +195,7 @@ default_authorizer::authorize(const role_or_anonymous& maybe_role, const resourc
|
||||
ROLE_NAME,
|
||||
RESOURCE_NAME);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
infinite_timeout_config,
|
||||
@@ -224,7 +224,7 @@ default_authorizer::modify(
|
||||
ROLE_NAME,
|
||||
RESOURCE_NAME),
|
||||
[this, &role_name, set, &resource](const auto& query) {
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -249,7 +249,7 @@ future<std::vector<permission_details>> default_authorizer::list_all() const {
|
||||
meta::AUTH_KS,
|
||||
PERMISSIONS_CF);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -276,7 +276,7 @@ future<> default_authorizer::revoke_all(std::string_view role_name) const {
|
||||
PERMISSIONS_CF,
|
||||
ROLE_NAME);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -296,7 +296,7 @@ future<> default_authorizer::revoke_all(const resource& resource) const {
|
||||
PERMISSIONS_CF,
|
||||
RESOURCE_NAME);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
infinite_timeout_config,
|
||||
@@ -313,7 +313,7 @@ future<> default_authorizer::revoke_all(const resource& resource) const {
|
||||
ROLE_NAME,
|
||||
RESOURCE_NAME);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
infinite_timeout_config,
|
||||
|
||||
@@ -71,7 +71,7 @@ public:
|
||||
|
||||
virtual future<> stop() override;
|
||||
|
||||
virtual std::string_view qualified_java_name() const override {
|
||||
virtual const sstring& qualified_java_name() const override {
|
||||
return default_authorizer_name();
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
#include <optional>
|
||||
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/reactor.hh>
|
||||
|
||||
#include "auth/authenticated_user.hh"
|
||||
#include "auth/common.hh"
|
||||
@@ -96,13 +96,10 @@ static bool has_salted_hash(const cql3::untyped_result_set_row& row) {
|
||||
return !row.get_or<sstring>(SALTED_HASH, "").empty();
|
||||
}
|
||||
|
||||
static const sstring& update_row_query() {
|
||||
static const sstring update_row_query = format("UPDATE {} SET {} = ? WHERE {} = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
SALTED_HASH,
|
||||
meta::roles_table::role_col_name);
|
||||
return update_row_query;
|
||||
}
|
||||
static const sstring update_row_query = format("UPDATE {} SET {} = ? WHERE {} = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
SALTED_HASH,
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
static const sstring legacy_table_name{"credentials"};
|
||||
|
||||
@@ -114,7 +111,7 @@ future<> password_authenticator::migrate_legacy_metadata() const {
|
||||
plogger.info("Starting migration of legacy authentication metadata.");
|
||||
static const sstring query = format("SELECT * FROM {}.{}", meta::AUTH_KS, legacy_table_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config()).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
@@ -122,8 +119,8 @@ future<> password_authenticator::migrate_legacy_metadata() const {
|
||||
auto username = row.get_as<sstring>("username");
|
||||
auto salted_hash = row.get_as<sstring>(SALTED_HASH);
|
||||
|
||||
return _qp.execute_internal(
|
||||
update_row_query(),
|
||||
return _qp.process(
|
||||
update_row_query,
|
||||
consistency_for_user(username),
|
||||
internal_distributed_timeout_config(),
|
||||
{std::move(salted_hash), username}).discard_result();
|
||||
@@ -139,8 +136,8 @@ future<> password_authenticator::migrate_legacy_metadata() const {
|
||||
future<> password_authenticator::create_default_if_missing() const {
|
||||
return default_role_row_satisfies(_qp, &has_salted_hash).then([this](bool exists) {
|
||||
if (!exists) {
|
||||
return _qp.execute_internal(
|
||||
update_row_query(),
|
||||
return _qp.process(
|
||||
update_row_query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config(),
|
||||
{passwords::hash(DEFAULT_USER_PASSWORD, rng_for_salt), DEFAULT_USER_NAME}).then([](auto&&) {
|
||||
@@ -197,7 +194,7 @@ db::consistency_level password_authenticator::consistency_for_user(std::string_v
|
||||
return db::consistency_level::LOCAL_ONE;
|
||||
}
|
||||
|
||||
std::string_view password_authenticator::qualified_java_name() const {
|
||||
const sstring& password_authenticator::qualified_java_name() const {
|
||||
return password_authenticator_name();
|
||||
}
|
||||
|
||||
@@ -230,13 +227,13 @@ future<authenticated_user> password_authenticator::authenticate(
|
||||
// obsolete prepared statements pretty quickly.
|
||||
// Rely on query processing caching statements instead, and lets assume
|
||||
// that a map lookup string->statement is not gonna kill us much.
|
||||
return futurize_invoke([this, username, password] {
|
||||
return futurize_apply([this, username, password] {
|
||||
static const sstring query = format("SELECT {} FROM {} WHERE {} = ?",
|
||||
SALTED_HASH,
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_user(username),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -270,8 +267,8 @@ future<> password_authenticator::create(std::string_view role_name, const authen
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
update_row_query(),
|
||||
return _qp.process(
|
||||
update_row_query,
|
||||
consistency_for_user(role_name),
|
||||
internal_distributed_timeout_config(),
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}).discard_result();
|
||||
@@ -287,7 +284,7 @@ future<> password_authenticator::alter(std::string_view role_name, const authent
|
||||
SALTED_HASH,
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_user(role_name),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -300,7 +297,7 @@ future<> password_authenticator::drop(std::string_view name) const {
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query, consistency_for_user(name),
|
||||
internal_distributed_timeout_config(),
|
||||
{sstring(name)}).discard_result();
|
||||
|
||||
@@ -71,7 +71,7 @@ public:
|
||||
|
||||
virtual future<> stop() override;
|
||||
|
||||
virtual std::string_view qualified_java_name() const override;
|
||||
virtual const sstring& qualified_java_name() const override;
|
||||
|
||||
virtual bool require_authentication() const override;
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@
|
||||
|
||||
#include "auth/resource.hh"
|
||||
#include "seastarx.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
|
||||
namespace auth {
|
||||
|
||||
@@ -53,9 +52,9 @@ struct role_config_update final {
|
||||
///
|
||||
/// A logical argument error for a role-management operation.
|
||||
///
|
||||
class roles_argument_exception : public exceptions::invalid_request_exception {
|
||||
class roles_argument_exception : public std::invalid_argument {
|
||||
public:
|
||||
using exceptions::invalid_request_exception::invalid_request_exception;
|
||||
using std::invalid_argument::invalid_argument;
|
||||
};
|
||||
|
||||
class role_already_exists : public roles_argument_exception {
|
||||
|
||||
@@ -68,14 +68,14 @@ future<bool> default_role_row_satisfies(
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return do_with(std::move(p), [&qp](const auto& p) {
|
||||
return qp.execute_internal(
|
||||
return qp.process(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
infinite_timeout_config,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
true).then([&qp, &p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return qp.execute_internal(
|
||||
return qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -100,7 +100,7 @@ future<bool> any_nondefault_role_row_satisfies(
|
||||
static const sstring query = format("SELECT * FROM {}", meta::roles_table::qualified_name());
|
||||
|
||||
return do_with(std::move(p), [&qp](const auto& p) {
|
||||
return qp.execute_internal(
|
||||
return qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config()).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
#include "db/consistency_level_type.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "log.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/migration_listener.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "database.hh"
|
||||
|
||||
@@ -77,23 +77,17 @@ private:
|
||||
void on_update_view(const sstring& ks_name, const sstring& view_name, bool columns_changed) override {}
|
||||
|
||||
void on_drop_keyspace(const sstring& ks_name) override {
|
||||
// Do it in the background.
|
||||
(void)_authorizer.revoke_all(
|
||||
_authorizer.revoke_all(
|
||||
auth::make_data_resource(ks_name)).handle_exception_type([](const unsupported_authorization_operation&) {
|
||||
// Nothing.
|
||||
}).handle_exception([] (std::exception_ptr e) {
|
||||
log.error("Unexpected exception while revoking all permissions on dropped keyspace: {}", e);
|
||||
});
|
||||
}
|
||||
|
||||
void on_drop_column_family(const sstring& ks_name, const sstring& cf_name) override {
|
||||
// Do it in the background.
|
||||
(void)_authorizer.revoke_all(
|
||||
_authorizer.revoke_all(
|
||||
auth::make_data_resource(
|
||||
ks_name, cf_name)).handle_exception_type([](const unsupported_authorization_operation&) {
|
||||
// Nothing.
|
||||
}).handle_exception([] (std::exception_ptr e) {
|
||||
log.error("Unexpected exception while revoking all permissions on dropped table: {}", e);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -114,14 +108,14 @@ static future<> validate_role_exists(const service& ser, std::string_view role_n
|
||||
service::service(
|
||||
permissions_cache_config c,
|
||||
cql3::query_processor& qp,
|
||||
::service::migration_notifier& mn,
|
||||
::service::migration_manager& mm,
|
||||
std::unique_ptr<authorizer> z,
|
||||
std::unique_ptr<authenticator> a,
|
||||
std::unique_ptr<role_manager> r)
|
||||
: _permissions_cache_config(std::move(c))
|
||||
, _permissions_cache(nullptr)
|
||||
, _qp(qp)
|
||||
, _mnotifier(mn)
|
||||
, _migration_manager(mm)
|
||||
, _authorizer(std::move(z))
|
||||
, _authenticator(std::move(a))
|
||||
, _role_manager(std::move(r))
|
||||
@@ -141,19 +135,18 @@ service::service(
|
||||
service::service(
|
||||
permissions_cache_config c,
|
||||
cql3::query_processor& qp,
|
||||
::service::migration_notifier& mn,
|
||||
::service::migration_manager& mm,
|
||||
const service_config& sc)
|
||||
: service(
|
||||
std::move(c),
|
||||
qp,
|
||||
mn,
|
||||
mm,
|
||||
create_object<authorizer>(sc.authorizer_java_name, qp, mm),
|
||||
create_object<authenticator>(sc.authenticator_java_name, qp, mm),
|
||||
create_object<role_manager>(sc.role_manager_java_name, qp, mm)) {
|
||||
}
|
||||
|
||||
future<> service::create_keyspace_if_missing(::service::migration_manager& mm) const {
|
||||
future<> service::create_keyspace_if_missing() const {
|
||||
auto& db = _qp.db();
|
||||
|
||||
if (!db.has_keyspace(meta::AUTH_KS)) {
|
||||
@@ -167,15 +160,15 @@ future<> service::create_keyspace_if_missing(::service::migration_manager& mm) c
|
||||
|
||||
// We use min_timestamp so that default keyspace metadata will loose with any manual adjustments.
|
||||
// See issue #2129.
|
||||
return mm.announce_new_keyspace(ksm, api::min_timestamp, false);
|
||||
return _migration_manager.announce_new_keyspace(ksm, api::min_timestamp, false);
|
||||
}
|
||||
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> service::start(::service::migration_manager& mm) {
|
||||
return once_among_shards([this, &mm] {
|
||||
return create_keyspace_if_missing(mm);
|
||||
future<> service::start() {
|
||||
return once_among_shards([this] {
|
||||
return create_keyspace_if_missing();
|
||||
}).then([this] {
|
||||
return _role_manager->start().then([this] {
|
||||
return when_all_succeed(_authorizer->start(), _authenticator->start());
|
||||
@@ -184,7 +177,7 @@ future<> service::start(::service::migration_manager& mm) {
|
||||
_permissions_cache = std::make_unique<permissions_cache>(_permissions_cache_config, *this, log);
|
||||
}).then([this] {
|
||||
return once_among_shards([this] {
|
||||
_mnotifier.register_listener(_migration_listener.get());
|
||||
_migration_manager.register_listener(_migration_listener.get());
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
@@ -193,12 +186,9 @@ future<> service::start(::service::migration_manager& mm) {
|
||||
future<> service::stop() {
|
||||
// Only one of the shards has the listener registered, but let's try to
|
||||
// unregister on each one just to make sure.
|
||||
return _mnotifier.unregister_listener(_migration_listener.get()).then([this] {
|
||||
if (_permissions_cache) {
|
||||
return _permissions_cache->stop();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).then([this] {
|
||||
_migration_manager.unregister_listener(_migration_listener.get());
|
||||
|
||||
return _permissions_cache->stop().then([this] {
|
||||
return when_all_succeed(_role_manager->stop(), _authorizer->stop(), _authenticator->stop());
|
||||
});
|
||||
}
|
||||
@@ -220,7 +210,7 @@ future<bool> service::has_existing_legacy_users() const {
|
||||
// This logic is borrowed directly from Apache Cassandra. By first checking for the presence of the default user, we
|
||||
// can potentially avoid doing a range query with a high consistency level.
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
default_user_query,
|
||||
db::consistency_level::ONE,
|
||||
infinite_timeout_config,
|
||||
@@ -230,7 +220,7 @@ future<bool> service::has_existing_legacy_users() const {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
default_user_query,
|
||||
db::consistency_level::QUORUM,
|
||||
infinite_timeout_config,
|
||||
@@ -240,7 +230,7 @@ future<bool> service::has_existing_legacy_users() const {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
all_users_query,
|
||||
db::consistency_level::QUORUM,
|
||||
infinite_timeout_config).then([](auto results) {
|
||||
@@ -419,7 +409,7 @@ future<> create_role(
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
return futurize_invoke(
|
||||
return futurize_apply(
|
||||
&validate_authentication_options_are_supported,
|
||||
options,
|
||||
ser.underlying_authenticator().supported_options()).then([&ser, name, &options] {
|
||||
@@ -443,7 +433,7 @@ future<> alter_role(
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
return futurize_invoke(
|
||||
return futurize_apply(
|
||||
&validate_authentication_options_are_supported,
|
||||
options,
|
||||
ser.underlying_authenticator().supported_options()).then([&ser, name, &options] {
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/util/bool_class.hh>
|
||||
#include <seastar/core/sharded.hh>
|
||||
|
||||
#include "auth/authenticator.hh"
|
||||
#include "auth/authorizer.hh"
|
||||
@@ -43,7 +42,6 @@ class query_processor;
|
||||
|
||||
namespace service {
|
||||
class migration_manager;
|
||||
class migration_notifier;
|
||||
class migration_listener;
|
||||
}
|
||||
|
||||
@@ -78,15 +76,13 @@ public:
|
||||
///
|
||||
/// All state associated with access-control is stored externally to any particular instance of this class.
|
||||
///
|
||||
/// peering_sharded_service inheritance is needed to be able to access shard local authentication service
|
||||
/// given an object from another shard. Used for bouncing lwt requests to correct shard.
|
||||
class service final : public seastar::peering_sharded_service<service> {
|
||||
class service final {
|
||||
permissions_cache_config _permissions_cache_config;
|
||||
std::unique_ptr<permissions_cache> _permissions_cache;
|
||||
|
||||
cql3::query_processor& _qp;
|
||||
|
||||
::service::migration_notifier& _mnotifier;
|
||||
::service::migration_manager& _migration_manager;
|
||||
|
||||
std::unique_ptr<authorizer> _authorizer;
|
||||
|
||||
@@ -101,7 +97,7 @@ public:
|
||||
service(
|
||||
permissions_cache_config,
|
||||
cql3::query_processor&,
|
||||
::service::migration_notifier&,
|
||||
::service::migration_manager&,
|
||||
std::unique_ptr<authorizer>,
|
||||
std::unique_ptr<authenticator>,
|
||||
std::unique_ptr<role_manager>);
|
||||
@@ -114,11 +110,10 @@ public:
|
||||
service(
|
||||
permissions_cache_config,
|
||||
cql3::query_processor&,
|
||||
::service::migration_notifier&,
|
||||
::service::migration_manager&,
|
||||
const service_config&);
|
||||
|
||||
future<> start(::service::migration_manager&);
|
||||
future<> start();
|
||||
|
||||
future<> stop();
|
||||
|
||||
@@ -164,7 +159,7 @@ public:
|
||||
private:
|
||||
future<bool> has_existing_legacy_users() const;
|
||||
|
||||
future<> create_keyspace_if_missing(::service::migration_manager& mm) const;
|
||||
future<> create_keyspace_if_missing() const;
|
||||
};
|
||||
|
||||
future<bool> has_superuser(const service&, const authenticated_user&);
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
#include "auth/common.hh"
|
||||
#include "auth/roles-metadata.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
#include "db/consistency_level_type.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "log.hh"
|
||||
@@ -87,7 +86,7 @@ static future<std::optional<record>> find_record(cql3::query_processor& qp, std:
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return qp.execute_internal(
|
||||
return qp.process(
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -102,8 +101,8 @@ static future<std::optional<record>> find_record(cql3::query_processor& qp, std:
|
||||
return std::make_optional(
|
||||
record{
|
||||
row.get_as<sstring>(sstring(meta::roles_table::role_col_name)),
|
||||
row.get_or<bool>("is_superuser", false),
|
||||
row.get_or<bool>("can_login", false),
|
||||
row.get_as<bool>("is_superuser"),
|
||||
row.get_as<bool>("can_login"),
|
||||
(row.has("member_of")
|
||||
? row.get_set<sstring>("member_of")
|
||||
: role_set())});
|
||||
@@ -171,7 +170,7 @@ future<> standard_role_manager::create_default_role_if_missing() const {
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -198,13 +197,13 @@ future<> standard_role_manager::migrate_legacy_metadata() const {
|
||||
log.info("Starting migration of legacy user metadata.");
|
||||
static const sstring query = format("SELECT * FROM {}.{}", meta::AUTH_KS, legacy_table_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config()).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
role_config config;
|
||||
config.is_superuser = row.get_or<bool>("super", false);
|
||||
config.is_superuser = row.get_as<bool>("super");
|
||||
config.can_login = true;
|
||||
|
||||
return do_with(
|
||||
@@ -259,7 +258,7 @@ future<> standard_role_manager::create_or_replace(std::string_view role_name, co
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -299,7 +298,7 @@ standard_role_manager::alter(std::string_view role_name, const role_config_updat
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
format("UPDATE {} SET {} WHERE {} = ?",
|
||||
meta::roles_table::qualified_name(),
|
||||
build_column_assignments(u),
|
||||
@@ -321,7 +320,7 @@ future<> standard_role_manager::drop(std::string_view role_name) const {
|
||||
static const sstring query = format("SELECT member FROM {} WHERE role = ?",
|
||||
meta::role_members_table::qualified_name());
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -360,7 +359,7 @@ future<> standard_role_manager::drop(std::string_view role_name) const {
|
||||
meta::roles_table::qualified_name(),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -387,7 +386,7 @@ standard_role_manager::modify_membership(
|
||||
(ch == membership_change::add ? '+' : '-'),
|
||||
meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
consistency_for_role(grantee_name),
|
||||
internal_distributed_timeout_config(),
|
||||
@@ -397,7 +396,7 @@ standard_role_manager::modify_membership(
|
||||
const auto modify_role_members = [this, role_name, grantee_name, ch] {
|
||||
switch (ch) {
|
||||
case membership_change::add:
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
format("INSERT INTO {} (role, member) VALUES (?, ?)",
|
||||
meta::role_members_table::qualified_name()),
|
||||
consistency_for_role(role_name),
|
||||
@@ -405,7 +404,7 @@ standard_role_manager::modify_membership(
|
||||
{sstring(role_name), sstring(grantee_name)}).discard_result();
|
||||
|
||||
case membership_change::remove:
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
format("DELETE FROM {} WHERE role = ? AND member = ?",
|
||||
meta::role_members_table::qualified_name()),
|
||||
consistency_for_role(role_name),
|
||||
@@ -509,7 +508,7 @@ future<role_set> standard_role_manager::query_all() const {
|
||||
// To avoid many copies of a view.
|
||||
static const auto role_col_name_string = sstring(meta::roles_table::role_col_name);
|
||||
|
||||
return _qp.execute_internal(
|
||||
return _qp.process(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_timeout_config()).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
|
||||
@@ -82,7 +82,7 @@ public:
|
||||
return _authenticator->stop();
|
||||
}
|
||||
|
||||
virtual std::string_view qualified_java_name() const override {
|
||||
virtual const sstring& qualified_java_name() const override {
|
||||
return transitional_authenticator_name();
|
||||
}
|
||||
|
||||
@@ -158,7 +158,7 @@ public:
|
||||
}
|
||||
|
||||
virtual future<authenticated_user> get_authenticated_user() const {
|
||||
return futurize_invoke([this] {
|
||||
return futurize_apply([this] {
|
||||
return _sasl->get_authenticated_user().handle_exception([](auto ep) {
|
||||
try {
|
||||
std::rethrow_exception(ep);
|
||||
@@ -201,7 +201,7 @@ public:
|
||||
return _authorizer->stop();
|
||||
}
|
||||
|
||||
virtual std::string_view qualified_java_name() const override {
|
||||
virtual const sstring& qualified_java_name() const override {
|
||||
return transitional_authorizer_name();
|
||||
}
|
||||
|
||||
|
||||
@@ -23,11 +23,7 @@
|
||||
#include <seastar/core/scheduling.hh>
|
||||
#include <seastar/core/timer.hh>
|
||||
#include <seastar/core/gate.hh>
|
||||
#include <seastar/core/file.hh>
|
||||
#include <chrono>
|
||||
#include <cmath>
|
||||
|
||||
#include "seastarx.hh"
|
||||
|
||||
// Simple proportional controller to adjust shares for processes for which a backlog can be clearly
|
||||
// defined.
|
||||
|
||||
72
build_id.cc
72
build_id.cc
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
#include "build_id.hh"
|
||||
#include <fmt/printf.h>
|
||||
#include <link.h>
|
||||
#include <seastar/core/align.hh>
|
||||
#include <sstream>
|
||||
#include <cassert>
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
static const Elf64_Nhdr* get_nt_build_id(dl_phdr_info* info) {
|
||||
auto base = info->dlpi_addr;
|
||||
const auto* h = info->dlpi_phdr;
|
||||
auto num_headers = info->dlpi_phnum;
|
||||
for (int i = 0; i != num_headers; ++i, ++h) {
|
||||
if (h->p_type != PT_NOTE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto* p = reinterpret_cast<const char*>(base) + h->p_vaddr;
|
||||
auto* e = p + h->p_memsz;
|
||||
while (p != e) {
|
||||
const auto* n = reinterpret_cast<const Elf64_Nhdr*>(p);
|
||||
if (n->n_type == NT_GNU_BUILD_ID) {
|
||||
return n;
|
||||
}
|
||||
|
||||
p += sizeof(Elf64_Nhdr);
|
||||
|
||||
p += n->n_namesz;
|
||||
p = align_up(p, 4);
|
||||
|
||||
p += n->n_descsz;
|
||||
p = align_up(p, 4);
|
||||
}
|
||||
}
|
||||
|
||||
assert(0 && "no NT_GNU_BUILD_ID note");
|
||||
}
|
||||
|
||||
static int callback(dl_phdr_info* info, size_t size, void* data) {
|
||||
std::string& ret = *(std::string*)data;
|
||||
std::ostringstream os;
|
||||
|
||||
// The first DSO is always the main program, which has an empty name.
|
||||
assert(strlen(info->dlpi_name) == 0);
|
||||
|
||||
auto* n = get_nt_build_id(info);
|
||||
auto* p = reinterpret_cast<const char*>(n);
|
||||
|
||||
p += sizeof(Elf64_Nhdr);
|
||||
|
||||
p += n->n_namesz;
|
||||
p = align_up(p, 4);
|
||||
|
||||
const char* desc = p;
|
||||
for (unsigned i = 0; i < n->n_descsz; ++i) {
|
||||
fmt::fprintf(os, "%02x", (unsigned char)*(desc + i));
|
||||
}
|
||||
ret = os.str();
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string get_build_id() {
|
||||
std::string ret;
|
||||
int r = dl_iterate_phdr(callback, &ret);
|
||||
assert(r == 1);
|
||||
return ret;
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
std::string get_build_id();
|
||||
2
bytes.cc
2
bytes.cc
@@ -64,7 +64,7 @@ bytes from_hex(sstring_view s) {
|
||||
|
||||
sstring to_hex(bytes_view b) {
|
||||
static char digits[] = "0123456789abcdef";
|
||||
sstring out = uninitialized_string(b.size() * 2);
|
||||
sstring out(sstring::initialized_later(), b.size() * 2);
|
||||
unsigned end = b.size();
|
||||
for (unsigned i = 0; i != end; ++i) {
|
||||
uint8_t x = b[i];
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user