Compare commits
149 Commits
kbr--patch
...
next-4.6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6c0825e2a6 | ||
|
|
db3dd3bdf6 | ||
|
|
4ad24180f5 | ||
|
|
755c7eeb6a | ||
|
|
8914ca8c58 | ||
|
|
e82e4bbed3 | ||
|
|
f9c457778e | ||
|
|
8315a7b164 | ||
|
|
291ca8db60 | ||
|
|
4da5fbaa24 | ||
|
|
fc16664d81 | ||
|
|
80bea5341e | ||
|
|
6ecc772b56 | ||
|
|
0b2e951954 | ||
|
|
f2a738497f | ||
|
|
badf7c816f | ||
|
|
bfb86f2c78 | ||
|
|
18e7a46038 | ||
|
|
cbcfa31e51 | ||
|
|
5ee69ff3a9 | ||
|
|
949103d22a | ||
|
|
549cb60f4c | ||
|
|
37633c5576 | ||
|
|
abd9f43fa7 | ||
|
|
d41d4db5c0 | ||
|
|
c500043a78 | ||
|
|
af4752a526 | ||
|
|
0aa9a8c266 | ||
|
|
85fd6ab377 | ||
|
|
7c79c513d1 | ||
|
|
9a8e73f0c3 | ||
|
|
fac0443200 | ||
|
|
6bcfef2cfa | ||
|
|
d2c67a2429 | ||
|
|
d6c2f228e7 | ||
|
|
a1b1df2074 | ||
|
|
14e13ecbd4 | ||
|
|
b8740bde6e | ||
|
|
1b23f8d038 | ||
|
|
05a228e4c5 | ||
|
|
2ec293ab0e | ||
|
|
b60f14601e | ||
|
|
284dd21ef7 | ||
|
|
8b52f1d6e7 | ||
|
|
157951f756 | ||
|
|
4f643ed4a5 | ||
|
|
b598629b7f | ||
|
|
43f82047b9 | ||
|
|
ec3c07de6e | ||
|
|
82572e8cfe | ||
|
|
2b9ed79c6f | ||
|
|
ab0b6fd372 | ||
|
|
12f1718ef4 | ||
|
|
322dfe8403 | ||
|
|
11f008e8fd | ||
|
|
fd7314a362 | ||
|
|
d27468f078 | ||
|
|
74ef1ee961 | ||
|
|
07549d159c | ||
|
|
189bbcd82d | ||
|
|
70e6921125 | ||
|
|
e314158708 | ||
|
|
46586532c9 | ||
|
|
0114244363 | ||
|
|
f154c8b719 | ||
|
|
8bf149fdd6 | ||
|
|
0265d56173 | ||
|
|
e50452ba43 | ||
|
|
a205f644cb | ||
|
|
f136b5b950 | ||
|
|
69a1325884 | ||
|
|
ab153c9b94 | ||
|
|
eb372d7f03 | ||
|
|
e232711e7e | ||
|
|
0a440b6d4a | ||
|
|
00bb1e8145 | ||
|
|
e30dbee2db | ||
|
|
2309d6b51e | ||
|
|
b77ca07709 | ||
|
|
bb0a38f889 | ||
|
|
c48fd03463 | ||
|
|
eb78e6d4b8 | ||
|
|
4b1b0a55c0 | ||
|
|
172a8628d5 | ||
|
|
5688b125e6 | ||
|
|
6da4acb41e | ||
|
|
f09cc9a01d | ||
|
|
cd2e33ede4 | ||
|
|
32d0698d78 | ||
|
|
93cf43ae4b | ||
|
|
2f2d22a864 | ||
|
|
5f92f54f06 | ||
|
|
395f2459b4 | ||
|
|
019d50bb5c | ||
|
|
bbe775b926 | ||
|
|
469c94ea17 | ||
|
|
4c780d0265 | ||
|
|
0181de1f2c | ||
|
|
7597a79ef9 | ||
|
|
8f5148e921 | ||
|
|
5694ec189f | ||
|
|
34d470967a | ||
|
|
61db571a44 | ||
|
|
5b5a300a9e | ||
|
|
148a65d0d6 | ||
|
|
e3ad14d55f | ||
|
|
2b506c2d4a | ||
|
|
50aad1c668 | ||
|
|
7bf3f37cd1 | ||
|
|
0f7f8585f2 | ||
|
|
2c65c4a569 | ||
|
|
f85cd289bc | ||
|
|
5e661af9a4 | ||
|
|
5629b67d25 | ||
|
|
ad632cf7fc | ||
|
|
ca24bebcf2 | ||
|
|
7dc5abb6f8 | ||
|
|
e8a1cfb6f8 | ||
|
|
fc312b3021 | ||
|
|
7b82aaf939 | ||
|
|
894a4abfae | ||
|
|
4dcf023470 | ||
|
|
283788828e | ||
|
|
730a147ba6 | ||
|
|
9897e83029 | ||
|
|
1a9b64e6f6 | ||
|
|
49fe9e2c8e | ||
|
|
d0580c41ee | ||
|
|
542394c82f | ||
|
|
018ad3f6f4 | ||
|
|
9b8b7efb54 | ||
|
|
1c3e63975f | ||
|
|
11bb03e46d | ||
|
|
810e410c5d | ||
|
|
97f6da0c3e | ||
|
|
c229fe9694 | ||
|
|
ee1ca8ae4d | ||
|
|
6bfd322e3b | ||
|
|
afc18d5070 | ||
|
|
2ec22c2404 | ||
|
|
19da778271 | ||
|
|
cbd4c13ba6 | ||
|
|
338871802d | ||
|
|
8b5b1b8af6 | ||
|
|
ea89eff95d | ||
|
|
96421e7779 | ||
|
|
142336ca53 | ||
|
|
492f12248c | ||
|
|
7eb7a0e5fe |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1,3 +1,2 @@
|
||||
*.cc diff=cpp
|
||||
*.hh diff=cpp
|
||||
*.svg binary
|
||||
|
||||
22
.github/CODEOWNERS
vendored
22
.github/CODEOWNERS
vendored
@@ -2,14 +2,14 @@
|
||||
auth/* @elcallio @vladzcloudius
|
||||
|
||||
# CACHE
|
||||
row_cache* @tgrabiec
|
||||
*mutation* @tgrabiec
|
||||
test/boost/mvcc* @tgrabiec
|
||||
row_cache* @tgrabiec @haaawk
|
||||
*mutation* @tgrabiec @haaawk
|
||||
test/boost/mvcc* @tgrabiec @haaawk
|
||||
|
||||
# CDC
|
||||
cdc/* @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
cdc/* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
|
||||
# COMMITLOG / BATCHLOG
|
||||
db/commitlog/* @elcallio
|
||||
@@ -28,12 +28,8 @@ transport/*
|
||||
cql3/* @tgrabiec @psarna @cvybhu
|
||||
|
||||
# COUNTERS
|
||||
counters* @jul-stas
|
||||
tests/counter_test* @jul-stas
|
||||
|
||||
# DOCS
|
||||
docs/* @annastuchlik @tzach
|
||||
docs/alternator @annastuchlik @tzach @nyh @psarna
|
||||
counters* @haaawk @jul-stas
|
||||
tests/counter_test* @haaawk @jul-stas
|
||||
|
||||
# GOSSIP
|
||||
gms/* @tgrabiec @asias
|
||||
@@ -78,7 +74,7 @@ alternator/* @nyh @psarna
|
||||
test/alternator/* @nyh @psarna
|
||||
|
||||
# HINTED HANDOFF
|
||||
db/hints/* @piodul @vladzcloudius
|
||||
db/hints/* @haaawk @piodul @vladzcloudius
|
||||
|
||||
# REDIS
|
||||
redis/* @nyh @syuu1228
|
||||
|
||||
35
.github/workflows/docs-pages.yaml
vendored
35
.github/workflows/docs-pages.yaml
vendored
@@ -1,35 +0,0 @@
|
||||
name: "Docs / Publish"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Set up env
|
||||
run: make -C docs setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs multiversion
|
||||
- name: Build redirects
|
||||
run: make -C docs redirects
|
||||
- name: Deploy docs to GitHub Pages
|
||||
run: ./docs/_utils/deploy.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
29
.github/workflows/docs-pages@v2.yaml
vendored
Normal file
29
.github/workflows/docs-pages@v2.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "Docs / Publish"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build docs
|
||||
run: make -C docs multiversion
|
||||
- name: Deploy
|
||||
run: ./docs/_utils/deploy.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
28
.github/workflows/docs-pr.yaml
vendored
28
.github/workflows/docs-pr.yaml
vendored
@@ -1,28 +0,0 @@
|
||||
name: "Docs / Build PR"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Set up env
|
||||
run: make -C docs setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs test
|
||||
25
.github/workflows/docs-pr@v1.yaml
vendored
Normal file
25
.github/workflows/docs-pr@v1.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: "Docs / Build PR"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build docs
|
||||
run: make -C docs test
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -22,7 +22,6 @@ resources
|
||||
.pytest_cache
|
||||
/expressions.tokens
|
||||
tags
|
||||
!db/tags/
|
||||
testlog
|
||||
test/*/*.reject
|
||||
.vscode
|
||||
@@ -30,6 +29,3 @@ docs/_build
|
||||
docs/poetry.lock
|
||||
compile_commands.json
|
||||
.ccls-cache/
|
||||
.mypy_cache
|
||||
.envrc
|
||||
rust/Cargo.lock
|
||||
|
||||
2
.gitmodules
vendored
2
.gitmodules
vendored
@@ -1,6 +1,6 @@
|
||||
[submodule "seastar"]
|
||||
path = seastar
|
||||
url = ../seastar
|
||||
url = ../scylla-seastar
|
||||
ignore = dirty
|
||||
[submodule "swagger-ui"]
|
||||
path = swagger-ui
|
||||
|
||||
3
.mailmap
3
.mailmap
@@ -1,3 +0,0 @@
|
||||
Avi Kivity <avi@scylladb.com> Avi Kivity' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Raphael S. Carvalho <raphaelsc@scylladb.com> Raphael S. Carvalho' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Pavel Emelyanov <xemul@scylladb.com> Pavel Emelyanov' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
@@ -189,8 +189,6 @@ set(swagger_files
|
||||
api/api-doc/storage_service.json
|
||||
api/api-doc/stream_manager.json
|
||||
api/api-doc/system.json
|
||||
api/api-doc/task_manager.json
|
||||
api/api-doc/task_manager_test.json
|
||||
api/api-doc/utils.json)
|
||||
|
||||
set(swagger_gen_files)
|
||||
@@ -303,8 +301,6 @@ set(scylla_sources
|
||||
api/storage_service.cc
|
||||
api/stream_manager.cc
|
||||
api/system.cc
|
||||
api/task_manager.cc
|
||||
api/task_manager_test.cc
|
||||
atomic_cell.cc
|
||||
auth/allow_all_authenticator.cc
|
||||
auth/allow_all_authorizer.cc
|
||||
@@ -341,6 +337,7 @@ set(scylla_sources
|
||||
compaction/size_tiered_compaction_strategy.cc
|
||||
compaction/time_window_compaction_strategy.cc
|
||||
compress.cc
|
||||
connection_notifier.cc
|
||||
converting_mutation_partition_applier.cc
|
||||
counters.cc
|
||||
cql3/abstract_marker.cc
|
||||
@@ -352,8 +349,7 @@ set(scylla_sources
|
||||
cql3/constants.cc
|
||||
cql3/cql3_type.cc
|
||||
cql3/expr/expression.cc
|
||||
cql3/expr/prepare_expr.cc
|
||||
cql3/expr/restrictions.cc
|
||||
cql3/expr/term_expr.cc
|
||||
cql3/functions/aggregate_fcts.cc
|
||||
cql3/functions/castas_fcts.cc
|
||||
cql3/functions/error_injection_fcts.cc
|
||||
@@ -367,6 +363,7 @@ set(scylla_sources
|
||||
cql3/prepare_context.cc
|
||||
cql3/query_options.cc
|
||||
cql3/query_processor.cc
|
||||
cql3/relation.cc
|
||||
cql3/restrictions/statement_restrictions.cc
|
||||
cql3/result_set.cc
|
||||
cql3/role_name.cc
|
||||
@@ -377,6 +374,7 @@ set(scylla_sources
|
||||
cql3/selection/selector_factories.cc
|
||||
cql3/selection/simple_selector.cc
|
||||
cql3/sets.cc
|
||||
cql3/single_column_relation.cc
|
||||
cql3/statements/alter_keyspace_statement.cc
|
||||
cql3/statements/alter_service_level_statement.cc
|
||||
cql3/statements/alter_table_statement.cc
|
||||
@@ -428,9 +426,9 @@ set(scylla_sources
|
||||
cql3/statements/sl_prop_defs.cc
|
||||
cql3/statements/truncate_statement.cc
|
||||
cql3/statements/update_statement.cc
|
||||
cql3/statements/strongly_consistent_modification_statement.cc
|
||||
cql3/statements/strongly_consistent_select_statement.cc
|
||||
cql3/statements/use_statement.cc
|
||||
cql3/token_relation.cc
|
||||
cql3/tuples.cc
|
||||
cql3/type_json.cc
|
||||
cql3/untyped_result_set.cc
|
||||
cql3/update_parameters.cc
|
||||
@@ -438,7 +436,7 @@ set(scylla_sources
|
||||
cql3/util.cc
|
||||
cql3/ut_name.cc
|
||||
cql3/values.cc
|
||||
data_dictionary/data_dictionary.cc
|
||||
database.cc
|
||||
db/batchlog_manager.cc
|
||||
db/commitlog/commitlog.cc
|
||||
db/commitlog/commitlog_entry.cc
|
||||
@@ -456,7 +454,6 @@ set(scylla_sources
|
||||
db/large_data_handler.cc
|
||||
db/legacy_schema_migrator.cc
|
||||
db/marshal/type_parser.cc
|
||||
db/rate_limiter.cc
|
||||
db/schema_tables.cc
|
||||
db/size_estimates_virtual_reader.cc
|
||||
db/snapshot-ctl.cc
|
||||
@@ -472,10 +469,10 @@ set(scylla_sources
|
||||
dht/murmur3_partitioner.cc
|
||||
dht/range_streamer.cc
|
||||
dht/token.cc
|
||||
replica/distributed_loader.cc
|
||||
distributed_loader.cc
|
||||
duration.cc
|
||||
exceptions/exceptions.cc
|
||||
readers/mutation_readers.cc
|
||||
flat_mutation_reader.cc
|
||||
frozen_mutation.cc
|
||||
frozen_schema.cc
|
||||
generic_server.cc
|
||||
@@ -495,7 +492,7 @@ set(scylla_sources
|
||||
index/secondary_index_manager.cc
|
||||
init.cc
|
||||
keys.cc
|
||||
utils/lister.cc
|
||||
lister.cc
|
||||
locator/abstract_replication_strategy.cc
|
||||
locator/azure_snitch.cc
|
||||
locator/ec2_multi_region_snitch.cc
|
||||
@@ -513,7 +510,7 @@ set(scylla_sources
|
||||
locator/token_metadata.cc
|
||||
lang/lua.cc
|
||||
main.cc
|
||||
replica/memtable.cc
|
||||
memtable.cc
|
||||
message/messaging_service.cc
|
||||
multishard_mutation_query.cc
|
||||
mutation.cc
|
||||
@@ -522,7 +519,7 @@ set(scylla_sources
|
||||
mutation_partition_serializer.cc
|
||||
mutation_partition_view.cc
|
||||
mutation_query.cc
|
||||
readers/mutation_reader.cc
|
||||
mutation_reader.cc
|
||||
mutation_writer/feed_writers.cc
|
||||
mutation_writer/multishard_writer.cc
|
||||
mutation_writer/partition_based_splitting_writer.cc
|
||||
@@ -532,18 +529,14 @@ set(scylla_sources
|
||||
partition_version.cc
|
||||
querier.cc
|
||||
query.cc
|
||||
query_ranges_to_vnodes.cc
|
||||
query-result-set.cc
|
||||
raft/fsm.cc
|
||||
raft/log.cc
|
||||
raft/raft.cc
|
||||
raft/server.cc
|
||||
raft/tracker.cc
|
||||
service/broadcast_tables/experimental/lang.cc
|
||||
range_tombstone.cc
|
||||
range_tombstone_list.cc
|
||||
tombstone_gc_options.cc
|
||||
tombstone_gc.cc
|
||||
reader_concurrency_semaphore.cc
|
||||
redis/abstract_command.cc
|
||||
redis/command_factory.cc
|
||||
@@ -560,15 +553,12 @@ set(scylla_sources
|
||||
release.cc
|
||||
repair/repair.cc
|
||||
repair/row_level.cc
|
||||
replica/database.cc
|
||||
replica/table.cc
|
||||
row_cache.cc
|
||||
schema.cc
|
||||
schema_mutations.cc
|
||||
schema_registry.cc
|
||||
serializer.cc
|
||||
service/client_state.cc
|
||||
service/forward_service.cc
|
||||
service/migration_manager.cc
|
||||
service/misc_services.cc
|
||||
service/pager/paging_state.cc
|
||||
@@ -581,10 +571,11 @@ set(scylla_sources
|
||||
service/qos/qos_common.cc
|
||||
service/qos/service_level_controller.cc
|
||||
service/qos/standard_service_level_distributed_data_accessor.cc
|
||||
service/raft/raft_gossip_failure_detector.cc
|
||||
service/raft/raft_group_registry.cc
|
||||
service/raft/raft_rpc.cc
|
||||
service/raft/raft_sys_table_storage.cc
|
||||
service/raft/group0_state_machine.cc
|
||||
service/raft/schema_raft_state_machine.cc
|
||||
service/storage_proxy.cc
|
||||
service/storage_service.cc
|
||||
sstables/compress.cc
|
||||
@@ -618,8 +609,8 @@ set(scylla_sources
|
||||
streaming/stream_summary.cc
|
||||
streaming/stream_task.cc
|
||||
streaming/stream_transfer_task.cc
|
||||
table.cc
|
||||
table_helper.cc
|
||||
tasks/task_manager.cc
|
||||
thrift/controller.cc
|
||||
thrift/handler.cc
|
||||
thrift/server.cc
|
||||
|
||||
@@ -18,5 +18,3 @@ If you need help formatting or sending patches, [check out these instructions](h
|
||||
The Scylla C++ source code uses the [Seastar coding style](https://github.com/scylladb/seastar/blob/master/coding-style.md) so please adhere to that in your patches. Note that Scylla code is written with `using namespace seastar`, so should not explicitly add the `seastar::` prefix to Seastar symbols. You will usually not need to add `using namespace seastar` to new source files, because most Scylla header files have `#include "seastarx.hh"`, which does this.
|
||||
|
||||
Header files in Scylla must be self-contained, i.e., each can be included without having to include specific other headers first. To verify that your change did not break this property, run `ninja dev-headers`. If you added or removed header files, you must `touch configure.py` first - this will cause `configure.py` to be automatically re-run to generate a fresh list of header files.
|
||||
|
||||
For more criteria on what reviewers consider good code, see the [review checklist](https://github.com/scylladb/scylla/blob/master/docs/dev/review-checklist.md).
|
||||
|
||||
36
HACKING.md
36
HACKING.md
@@ -383,40 +383,6 @@ Open the link printed at the end. Be horrified. Go and write more tests.
|
||||
|
||||
For more details see `./scripts/coverage.py --help`.
|
||||
|
||||
### Resolving stack backtraces
|
||||
|
||||
Scylla may print stack backtraces to the log for several reasons.
|
||||
For example:
|
||||
- When aborting (e.g. due to assertion failure, internal error, or segfault)
|
||||
- When detecting seastar reactor stalls (where a seastar task runs for a long time without yielding the cpu to other tasks on that shard)
|
||||
|
||||
The backtraces contain code pointers so they are not very helpful without resolving into code locations.
|
||||
To resolve the backtraces, one needs the scylla relocatable package that contains the scylla binary (with debug information),
|
||||
as well as the dynamic libraries it is linked against.
|
||||
|
||||
Builds from our automated build system are uploaded to the cloud
|
||||
and can be searched on http://backtrace.scylladb.com/
|
||||
|
||||
Make sure you have the scylla server exact `build-id` to locate
|
||||
its respective relocatable package, required for decoding backtraces it prints.
|
||||
|
||||
The build-id is printed to the system log when scylla starts.
|
||||
It can also be found by executing `scylla --build-id`, or
|
||||
by using the `file` utility, for example:
|
||||
```
|
||||
$ scylla --build-id
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
|
||||
$ file scylla
|
||||
scylla: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=4cba12e6eb290a406bfa4930918db23941fd4be3, with debug_info, not stripped, too many notes (256)
|
||||
```
|
||||
|
||||
To find the build-id of a coredump, use the `eu-unstrip` utility as follows:
|
||||
```
|
||||
$ eu-unstrip -n --core <coredump> | awk '/scylla$/ { s=$2; sub(/@.*$/, "", s); print s; exit(0); }'
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
```
|
||||
|
||||
### Core dump debugging
|
||||
|
||||
See [debugging.md](docs/dev/debugging.md).
|
||||
See [debugging.md](debugging.md).
|
||||
|
||||
@@ -42,7 +42,7 @@ For further information, please see:
|
||||
* [Docker image build documentation] for information on how to build Docker images.
|
||||
|
||||
[developer documentation]: HACKING.md
|
||||
[build documentation]: docs/dev/building.md
|
||||
[build documentation]: docs/guides/building.md
|
||||
[docker image build documentation]: dist/docker/debian/README.md
|
||||
|
||||
## Running Scylla
|
||||
@@ -65,7 +65,7 @@ $ ./tools/toolchain/dbuild ./build/release/scylla --help
|
||||
|
||||
## Testing
|
||||
|
||||
See [test.py manual](docs/dev/testing.md).
|
||||
See [test.py manual](docs/guides/testing.md).
|
||||
|
||||
## Scylla APIs and compatibility
|
||||
By default, Scylla is compatible with Apache Cassandra and its APIs - CQL and
|
||||
@@ -78,7 +78,7 @@ and the current compatibility of this feature as well as Scylla-specific extensi
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation can be found [here](docs/dev/README.md).
|
||||
Documentation can be found [here](https://scylla.docs.scylladb.com).
|
||||
Seastar documentation can be found [here](http://docs.seastar.io/master/index.html).
|
||||
User documentation can be found [here](https://docs.scylladb.com/).
|
||||
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
USAGE=$(cat <<-END
|
||||
Usage: $(basename "$0") [-h|--help] [-o|--output-dir PATH] [--date-stamp DATE] -- generate Scylla version and build information files.
|
||||
Usage: $(basename "$0") [-h|--help] [-o|--output-dir PATH] -- generate Scylla version and build information files.
|
||||
|
||||
Options:
|
||||
-h|--help show this help message.
|
||||
-o|--output-dir PATH specify destination path at which the version files are to be created.
|
||||
-d|--date-stamp DATE manually set date for release parameter
|
||||
|
||||
By default, the script will attempt to parse 'version' file
|
||||
in the current directory, which should contain a string of
|
||||
@@ -32,8 +31,6 @@ using '-o PATH' option.
|
||||
END
|
||||
)
|
||||
|
||||
DATE=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
opt="$1"
|
||||
case $opt in
|
||||
@@ -46,11 +43,6 @@ while [[ $# -gt 0 ]]; do
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--date-stamp)
|
||||
DATE="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected argument found: $1"
|
||||
echo
|
||||
@@ -66,33 +58,24 @@ if [ -z "$OUTPUT_DIR" ]; then
|
||||
OUTPUT_DIR="$SCRIPT_DIR/build"
|
||||
fi
|
||||
|
||||
if [ -z "$DATE" ]; then
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.2.0-dev
|
||||
VERSION=4.6.11
|
||||
|
||||
if test -f version
|
||||
then
|
||||
SCYLLA_VERSION=$(cat version | awk -F'-' '{print $1}')
|
||||
SCYLLA_RELEASE=$(cat version | awk -F'-' '{print $2}')
|
||||
else
|
||||
DATE=$(date +%Y%m%d)
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1)
|
||||
SCYLLA_VERSION=$VERSION
|
||||
if [ -z "$SCYLLA_RELEASE" ]; then
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1 --abbrev=12)
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
elif [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
echo "setting SCYLLA_RELEASE only makes sense in clean builds" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
fi
|
||||
|
||||
if [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
|
||||
2
abseil
2
abseil
Submodule abseil updated: 7f3c0d7811...f70eadadd7
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "absl-flat_hash_map.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "alternator/error.hh"
|
||||
@@ -21,6 +34,7 @@
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "alternator/executor.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "database.hh"
|
||||
#include "query-result-set.hh"
|
||||
#include "cql3/result_set.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
@@ -123,18 +137,17 @@ std::string get_signature(std::string_view access_key_id, std::string_view secre
|
||||
}
|
||||
|
||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username) {
|
||||
schema_ptr schema = proxy.data_dictionary().find_schema("system_auth", "roles");
|
||||
schema_ptr schema = proxy.get_db().local().find_schema("system_auth", "roles");
|
||||
partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username));
|
||||
dht::partition_range_vector partition_ranges{dht::partition_range(dht::decorate_key(*schema, pk))};
|
||||
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
const column_definition* salted_hash_col = schema->get_column_definition(bytes("salted_hash"));
|
||||
if (!salted_hash_col) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
}
|
||||
auto selection = cql3::selection::selection::for_columns(schema, {salted_hash_col});
|
||||
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id}, selection->get_query_options());
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice,
|
||||
proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, proxy.get_max_result_size(partition_slice));
|
||||
auto cl = auth::password_authenticator::consistency_for_user(username);
|
||||
|
||||
service::client_state client_state{service::client_state::internal_tag()};
|
||||
@@ -146,11 +159,11 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::strin
|
||||
|
||||
auto result_set = builder.build();
|
||||
if (result_set->empty()) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
}
|
||||
const bytes_opt& salted_hash = result_set->rows().front().front(); // We only asked for 1 row and 1 column
|
||||
if (!salted_hash) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
}
|
||||
co_return value_cast<sstring>(utf8_type->deserialize(*salted_hash));
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -22,7 +35,7 @@ namespace alternator {
|
||||
|
||||
using hmac_sha256_digest = std::array<char, 32>;
|
||||
|
||||
using key_cache = utils::loading_cache<std::string, std::string, 1>;
|
||||
using key_cache = utils::loading_cache<std::string, std::string>;
|
||||
|
||||
std::string get_signature(std::string_view access_key_id, std::string_view secret_access_key, std::string_view host, std::string_view method,
|
||||
std::string_view orig_datestamp, std::string_view signed_headers_str, const std::map<std::string_view, std::string_view>& signed_headers_map,
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <list>
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <seastar/net/dns.hh>
|
||||
@@ -14,8 +27,6 @@
|
||||
#include "db/config.hh"
|
||||
#include "cdc/generation_service.hh"
|
||||
#include "service/memory_limiter.hh"
|
||||
#include "auth/service.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
@@ -30,8 +41,6 @@ controller::controller(
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
const db::config& config)
|
||||
: _gossiper(gossiper)
|
||||
, _proxy(proxy)
|
||||
@@ -39,32 +48,12 @@ controller::controller(
|
||||
, _sys_dist_ks(sys_dist_ks)
|
||||
, _cdc_gen_svc(cdc_gen_svc)
|
||||
, _memory_limiter(memory_limiter)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _config(config)
|
||||
{
|
||||
}
|
||||
|
||||
sstring controller::name() const {
|
||||
return "alternator";
|
||||
}
|
||||
|
||||
sstring controller::protocol() const {
|
||||
return "dynamodb";
|
||||
}
|
||||
|
||||
sstring controller::protocol_version() const {
|
||||
return version;
|
||||
}
|
||||
|
||||
std::vector<socket_address> controller::listen_addresses() const {
|
||||
return _listen_addresses;
|
||||
}
|
||||
|
||||
future<> controller::start_server() {
|
||||
future<> controller::start() {
|
||||
return seastar::async([this] {
|
||||
_listen_addresses.clear();
|
||||
|
||||
auto preferred = _config.listen_interface_prefer_ipv6() ? std::make_optional(net::inet_address::family::INET6) : std::nullopt;
|
||||
auto family = _config.enable_ipv6_dns_lookup() || preferred ? std::nullopt : std::make_optional(net::inet_address::family::INET);
|
||||
|
||||
@@ -78,27 +67,25 @@ future<> controller::start_server() {
|
||||
rmw_operation::set_default_write_isolation(_config.alternator_write_isolation());
|
||||
executor::set_default_timeout(std::chrono::milliseconds(_config.alternator_timeout_in_ms()));
|
||||
|
||||
net::inet_address addr = utils::resolve(_config.alternator_address, family).get0();
|
||||
net::inet_address addr;
|
||||
try {
|
||||
addr = net::dns::get_host_by_name(_config.alternator_address(), family).get0().addr_list.front();
|
||||
} catch (...) {
|
||||
std::throw_with_nested(std::runtime_error(fmt::format("Unable to resolve alternator_address {}", _config.alternator_address())));
|
||||
}
|
||||
|
||||
auto get_cdc_metadata = [] (cdc::generation_service& svc) { return std::ref(svc.get_cdc_metadata()); };
|
||||
|
||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_mm), std::ref(_sys_dist_ks), sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value()).get();
|
||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
|
||||
// Note: from this point on, if start_server() throws for any reason,
|
||||
// it must first call stop_server() to stop the executor and server
|
||||
// services we just started - or Scylla will cause an assertion
|
||||
// failure when the controller object is destroyed in the exception
|
||||
// unwinding.
|
||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper)).get();
|
||||
std::optional<uint16_t> alternator_port;
|
||||
if (_config.alternator_port()) {
|
||||
alternator_port = _config.alternator_port();
|
||||
_listen_addresses.push_back({addr, *alternator_port});
|
||||
}
|
||||
std::optional<uint16_t> alternator_https_port;
|
||||
std::optional<tls::credentials_builder> creds;
|
||||
if (_config.alternator_https_port()) {
|
||||
alternator_https_port = _config.alternator_https_port();
|
||||
_listen_addresses.push_back({addr, *alternator_https_port});
|
||||
creds.emplace();
|
||||
auto opts = _config.alternator_encryption_options();
|
||||
if (opts.empty()) {
|
||||
@@ -115,13 +102,7 @@ future<> controller::start_server() {
|
||||
}
|
||||
opts.erase("require_client_auth");
|
||||
opts.erase("truststore");
|
||||
try {
|
||||
utils::configure_tls_creds_builder(creds.value(), std::move(opts)).get();
|
||||
} catch(...) {
|
||||
logger.error("Failed to set up Alternator TLS credentials: {}", std::current_exception());
|
||||
stop_server().get();
|
||||
std::throw_with_nested(std::runtime_error("Failed to set up Alternator TLS credentials"));
|
||||
}
|
||||
utils::configure_tls_creds_builder(creds.value(), std::move(opts)).get();
|
||||
}
|
||||
bool alternator_enforce_authorization = _config.alternator_enforce_authorization();
|
||||
_server.invoke_on_all(
|
||||
@@ -129,10 +110,6 @@ future<> controller::start_server() {
|
||||
return server.init(addr, alternator_port, alternator_https_port, creds, alternator_enforce_authorization,
|
||||
&_memory_limiter.local().get_semaphore(),
|
||||
_config.max_concurrent_requests_per_shard);
|
||||
}).handle_exception([this, addr, alternator_port, alternator_https_port] (std::exception_ptr ep) {
|
||||
logger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}: {}",
|
||||
addr, alternator_port ? std::to_string(*alternator_port) : "OFF", alternator_https_port ? std::to_string(*alternator_https_port) : "OFF", ep);
|
||||
return stop_server().then([ep = std::move(ep)] { return make_exception_future<>(ep); });
|
||||
}).then([addr, alternator_port, alternator_https_port] {
|
||||
logger.info("Alternator server listening on {}, HTTP port {}, HTTPS port {}",
|
||||
addr, alternator_port ? std::to_string(*alternator_port) : "OFF", alternator_https_port ? std::to_string(*alternator_https_port) : "OFF");
|
||||
@@ -140,20 +117,12 @@ future<> controller::start_server() {
|
||||
});
|
||||
}
|
||||
|
||||
future<> controller::stop_server() {
|
||||
future<> controller::stop() {
|
||||
return seastar::async([this] {
|
||||
if (!_ssg) {
|
||||
return;
|
||||
}
|
||||
_server.stop().get();
|
||||
_executor.stop().get();
|
||||
_listen_addresses.clear();
|
||||
destroy_smp_service_group(_ssg.value()).get();
|
||||
});
|
||||
}
|
||||
|
||||
future<> controller::request_stop_server() {
|
||||
return stop_server();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -11,8 +24,6 @@
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include <seastar/core/smp.hh>
|
||||
|
||||
#include "protocol_server.hh"
|
||||
|
||||
namespace service {
|
||||
class storage_proxy;
|
||||
class migration_manager;
|
||||
@@ -34,38 +45,22 @@ class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth {
|
||||
class service;
|
||||
}
|
||||
|
||||
namespace qos {
|
||||
class service_level_controller;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// This is the official DynamoDB API version.
|
||||
// It represents the last major reorganization of that API, and all the features
|
||||
// that were added since did NOT increment this version string.
|
||||
constexpr const char* version = "2012-08-10";
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
class executor;
|
||||
class server;
|
||||
|
||||
class controller : public protocol_server {
|
||||
class controller {
|
||||
sharded<gms::gossiper>& _gossiper;
|
||||
sharded<service::storage_proxy>& _proxy;
|
||||
sharded<service::migration_manager>& _mm;
|
||||
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
||||
sharded<cdc::generation_service>& _cdc_gen_svc;
|
||||
sharded<service::memory_limiter>& _memory_limiter;
|
||||
sharded<auth::service>& _auth_service;
|
||||
sharded<qos::service_level_controller>& _sl_controller;
|
||||
const db::config& _config;
|
||||
|
||||
std::vector<socket_address> _listen_addresses;
|
||||
sharded<executor> _executor;
|
||||
sharded<server> _server;
|
||||
std::optional<smp_service_group> _ssg;
|
||||
@@ -78,17 +73,10 @@ public:
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
const db::config& config);
|
||||
|
||||
virtual sstring name() const override;
|
||||
virtual sstring protocol() const override;
|
||||
virtual sstring protocol_version() const override;
|
||||
virtual std::vector<socket_address> listen_addresses() const override;
|
||||
virtual future<> start_server() override;
|
||||
virtual future<> stop_server() override;
|
||||
virtual future<> request_stop_server() override;
|
||||
future<> start();
|
||||
future<> stop();
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -70,12 +83,6 @@ public:
|
||||
static api_error request_limit_exceeded(std::string msg) {
|
||||
return api_error("RequestLimitExceeded", std::move(msg));
|
||||
}
|
||||
static api_error serialization(std::string msg) {
|
||||
return api_error("SerializationException", std::move(msg));
|
||||
}
|
||||
static api_error table_not_found(std::string msg) {
|
||||
return api_error("TableNotFoundException", std::move(msg));
|
||||
}
|
||||
static api_error internal(std::string msg) {
|
||||
return api_error("InternalServerError", std::move(msg), reply::status_type::internal_server_error);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -60,16 +73,6 @@ public:
|
||||
explicit make_jsonable(rjson::value&& value);
|
||||
std::string to_json() const override;
|
||||
};
|
||||
|
||||
/**
|
||||
* Make return type for serializing the object "streamed",
|
||||
* i.e. direct to HTTP output stream. Note: only useful for
|
||||
* (very) large objects as there are overhead issues with this
|
||||
* as well, but for massive lists of return objects this can
|
||||
* help avoid large allocations/many re-allocs
|
||||
*/
|
||||
json::json_return_type make_streamed(rjson::value&&);
|
||||
|
||||
struct json_string : public json::jsonable {
|
||||
std::string _value;
|
||||
public:
|
||||
@@ -81,10 +84,9 @@ namespace parsed {
|
||||
class path;
|
||||
};
|
||||
|
||||
const std::map<sstring, sstring>& get_tags_of_table(schema_ptr schema);
|
||||
future<> update_tags(service::migration_manager& mm, schema_ptr schema, std::map<sstring, sstring>&& tags_map);
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
// Wraps the db::get_tags_of_table and throws if the table is missing the tags extension.
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
|
||||
|
||||
// An attribute_path_map object is used to hold data for various attributes
|
||||
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
|
||||
@@ -144,11 +146,6 @@ template<typename T>
|
||||
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
|
||||
|
||||
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
|
||||
// attrs_to_get lists which top-level attribute are needed, and possibly also
|
||||
// which part of the top-level attribute is really needed (when nested
|
||||
// attribute paths appeared in the query).
|
||||
// Most code actually uses optional<attrs_to_get>. There, a disengaged
|
||||
// optional means we should get all attributes, not specific ones.
|
||||
using attrs_to_get = attribute_path_map<std::monostate>;
|
||||
|
||||
|
||||
@@ -196,11 +193,12 @@ public:
|
||||
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request);
|
||||
|
||||
future<> start();
|
||||
future<> stop() { return make_ready_future<>(); }
|
||||
|
||||
future<> create_keyspace(std::string_view keyspace_name);
|
||||
|
||||
static sstring table_name(const schema&);
|
||||
static db::timeout_clock::time_point default_timeout();
|
||||
static void set_default_timeout(db::timeout_clock::duration timeout);
|
||||
@@ -212,31 +210,27 @@ public:
|
||||
private:
|
||||
friend class rmw_operation;
|
||||
|
||||
static bool is_alternator_keyspace(const sstring& ks_name);
|
||||
static sstring make_keyspace_name(const sstring& table_name);
|
||||
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr);
|
||||
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&);
|
||||
|
||||
public:
|
||||
public:
|
||||
static std::optional<rjson::value> describe_single_item(schema_ptr,
|
||||
const query::partition_slice&,
|
||||
const cql3::selection::selection&,
|
||||
const query::result&,
|
||||
const std::optional<attrs_to_get>&);
|
||||
|
||||
static std::vector<rjson::value> describe_multi_item(schema_ptr schema,
|
||||
const query::partition_slice& slice,
|
||||
const cql3::selection::selection& selection,
|
||||
const query::result& query_result,
|
||||
const std::optional<attrs_to_get>& attrs_to_get);
|
||||
const attrs_to_get&);
|
||||
|
||||
static void describe_single_item(const cql3::selection::selection&,
|
||||
const std::vector<bytes_opt>&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
const attrs_to_get&,
|
||||
rjson::value&,
|
||||
bool = false);
|
||||
|
||||
static void add_stream_options(const rjson::value& stream_spec, schema_builder&, service::storage_proxy& sp);
|
||||
static void supplement_table_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
|
||||
static void supplement_table_stream_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
|
||||
void add_stream_options(const rjson::value& stream_spec, schema_builder&) const;
|
||||
void supplement_table_info(rjson::value& descr, const schema& schema) const;
|
||||
void supplement_table_stream_info(rjson::value& descr, const schema& schema) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "expressions.hh"
|
||||
@@ -29,7 +42,7 @@
|
||||
namespace alternator {
|
||||
|
||||
template <typename Func, typename Result = std::result_of_t<Func(expressionsParser&)>>
|
||||
Result do_with_parser(std::string_view input, Func&& f) {
|
||||
Result do_with_parser(std::string input, Func&& f) {
|
||||
expressionsLexer::InputStreamType input_stream{
|
||||
reinterpret_cast<const ANTLR_UINT8*>(input.data()),
|
||||
ANTLR_ENC_UTF8,
|
||||
@@ -44,7 +57,7 @@ Result do_with_parser(std::string_view input, Func&& f) {
|
||||
}
|
||||
|
||||
parsed::update_expression
|
||||
parse_update_expression(std::string_view query) {
|
||||
parse_update_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::update_expression));
|
||||
} catch (...) {
|
||||
@@ -53,7 +66,7 @@ parse_update_expression(std::string_view query) {
|
||||
}
|
||||
|
||||
std::vector<parsed::path>
|
||||
parse_projection_expression(std::string_view query) {
|
||||
parse_projection_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::projection_expression));
|
||||
} catch (...) {
|
||||
@@ -62,7 +75,7 @@ parse_projection_expression(std::string_view query) {
|
||||
}
|
||||
|
||||
parsed::condition_expression
|
||||
parse_condition_expression(std::string_view query) {
|
||||
parse_condition_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::condition_expression));
|
||||
} catch (...) {
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -26,9 +39,9 @@ public:
|
||||
using runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
parsed::update_expression parse_update_expression(std::string_view query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string_view query);
|
||||
parsed::condition_expression parse_condition_expression(std::string_view query);
|
||||
parsed::update_expression parse_update_expression(std::string query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string query);
|
||||
parsed::condition_expression parse_condition_expression(std::string query);
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "utils/base64.hh"
|
||||
@@ -14,14 +27,11 @@
|
||||
#include "rapidjson/writer.h"
|
||||
#include "concrete_types.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
#include "position_in_partition.hh"
|
||||
|
||||
static logging::logger slogger("alternator-serialization");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
|
||||
type_info type_info_from_string(std::string_view type) {
|
||||
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {
|
||||
{"S", {alternator_type::S, utf8_type}},
|
||||
@@ -164,43 +174,32 @@ bytes get_key_column_value(const rjson::value& item, const column_definition& co
|
||||
return get_key_from_typed_value(*key_typed_value, column);
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry whose key is the type and the value is the encoded value.
|
||||
// If this type does not match the desired "type_str", an api_error::validation
|
||||
// error is thrown (the "name" parameter is the name of the column which will
|
||||
// mentioned in the exception message).
|
||||
// If the type does match, a reference to the encoded value is returned.
|
||||
static const rjson::value& get_typed_value(const rjson::value& key_typed_value, std::string_view type_str, std::string_view name, std::string_view value_name) {
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for {} {}: {}",
|
||||
value_name, name, key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (rjson::to_string_view(it->name) != type_str) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for {} {}, got type {}",
|
||||
type_str, value_name, name, it->name));
|
||||
}
|
||||
return it->value;
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry, whose key is the type (expected to match the key column's type)
|
||||
// and the value is the encoded value.
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column) {
|
||||
auto& value = get_typed_value(key_typed_value, type_to_string(column.type), column.name_as_text(), "key column");
|
||||
std::string_view value_view = rjson::to_string_view(value);
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for key column {}: {}",
|
||||
column.name_as_text(), key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (it->name != type_to_string(column.type)) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for key column {}, got type {}",
|
||||
type_to_string(column.type), column.name_as_text(), it->name));
|
||||
}
|
||||
std::string_view value_view = rjson::to_string_view(it->value);
|
||||
if (value_view.empty()) {
|
||||
throw api_error::validation(
|
||||
format("The AttributeValue for a key attribute cannot contain an empty string value. Key: {}", column.name_as_text()));
|
||||
}
|
||||
if (column.type == bytes_type) {
|
||||
return rjson::base64_decode(value);
|
||||
return rjson::base64_decode(it->value);
|
||||
} else {
|
||||
return column.type->from_string(value_view);
|
||||
return column.type->from_string(rjson::to_string_view(it->value));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -251,39 +250,6 @@ clustering_key ck_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
return clustering_key::from_exploded(raw_ck);
|
||||
}
|
||||
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
auto ck = ck_from_json(item, schema);
|
||||
if (is_alternator_keyspace(schema->ks_name())) {
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
const auto region_item = rjson::find(item, scylla_paging_region);
|
||||
const auto weight_item = rjson::find(item, scylla_paging_weight);
|
||||
if (bool(region_item) != bool(weight_item)) {
|
||||
throw api_error::validation("Malformed value object: region and weight has to be either both missing or both present");
|
||||
}
|
||||
partition_region region;
|
||||
bound_weight weight;
|
||||
if (region_item) {
|
||||
auto region_view = rjson::to_string_view(get_typed_value(*region_item, "S", scylla_paging_region, "key region"));
|
||||
auto weight_view = rjson::to_string_view(get_typed_value(*weight_item, "N", scylla_paging_weight, "key weight"));
|
||||
auto region = parse_partition_region(region_view);
|
||||
if (weight_view == "-1") {
|
||||
weight = bound_weight::before_all_prefixed;
|
||||
} else if (weight_view == "0") {
|
||||
weight = bound_weight::equal;
|
||||
} else if (weight_view == "1") {
|
||||
weight = bound_weight::after_all_prefixed;
|
||||
} else {
|
||||
throw std::runtime_error(fmt::format("Invalid value for weight: {}", weight_view));
|
||||
}
|
||||
return position_in_partition(region, weight, region == partition_region::clustered ? std::optional(std::move(ck)) : std::nullopt);
|
||||
}
|
||||
if (ck.is_empty()) {
|
||||
return position_in_partition(position_in_partition::partition_start_tag_t());
|
||||
}
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error::validation(format("{}: invalid number object", diagnostic));
|
||||
@@ -293,9 +259,11 @@ big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
throw api_error::validation(format("{}: expected number, found type '{}'", diagnostic, it->name));
|
||||
}
|
||||
try {
|
||||
if (it->value.IsNumber()) {
|
||||
// FIXME(sarna): should use big_decimal constructor with numeric values directly:
|
||||
return big_decimal(rjson::print(it->value));
|
||||
}
|
||||
if (!it->value.IsString()) {
|
||||
// We shouldn't reach here. Callers normally validate their input
|
||||
// earlier with validate_value().
|
||||
throw api_error::validation(format("{}: improperly formatted number constant", diagnostic));
|
||||
}
|
||||
return big_decimal(rjson::to_string_view(it->value));
|
||||
@@ -304,21 +272,6 @@ big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<big_decimal> try_unwrap_number(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return std::nullopt;
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
if (it->name != "N" || !it->value.IsString()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
try {
|
||||
return big_decimal(rjson::to_string_view(it->value));
|
||||
} catch (const marshal_exception& e) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return {"", nullptr};
|
||||
@@ -326,7 +279,7 @@ const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value&
|
||||
auto it = v.MemberBegin();
|
||||
const std::string it_key = it->name.GetString();
|
||||
if (it_key != "SS" && it_key != "BS" && it_key != "NS") {
|
||||
return {std::move(it_key), nullptr};
|
||||
return {"", nullptr};
|
||||
}
|
||||
return std::make_pair(it_key, &(it->value));
|
||||
}
|
||||
@@ -396,7 +349,7 @@ std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value&
|
||||
auto [set1_type, set1] = unwrap_set(v1);
|
||||
auto [set2_type, set2] = unwrap_set(v2);
|
||||
if (set1_type != set2_type) {
|
||||
throw api_error::validation(format("Set DELETE type mismatch: {} and {}", set1_type, set2_type));
|
||||
throw api_error::validation(format("Mismatched set types: {} and {}", set1_type, set2_type));
|
||||
}
|
||||
if (!set1 || !set2) {
|
||||
throw api_error::validation("UpdateExpression: DELETE operation can only be performed on a set");
|
||||
|
||||
@@ -3,22 +3,32 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <optional>
|
||||
#include "types.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include "keys.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
|
||||
class position_in_partition;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
enum class alternator_type : int8_t {
|
||||
@@ -35,9 +45,6 @@ struct type_representation {
|
||||
data_type dtype;
|
||||
};
|
||||
|
||||
inline constexpr std::string_view scylla_paging_region(":scylla:paging:region");
|
||||
inline constexpr std::string_view scylla_paging_weight(":scylla:paging:weight");
|
||||
|
||||
type_info type_info_from_string(std::string_view type);
|
||||
type_representation represent_type(alternator_type atype);
|
||||
|
||||
@@ -52,16 +59,11 @@ rjson::value json_key_column_value(bytes_view cell, const column_definition& col
|
||||
|
||||
partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
|
||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
|
||||
|
||||
// If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
|
||||
// raises ValidationException with diagnostic.
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
||||
|
||||
// try_unwrap_number is like unwrap_number, but returns an unset optional
|
||||
// when the given v does not encode a number.
|
||||
std::optional<big_decimal> try_unwrap_number(const rjson::value& v);
|
||||
|
||||
// Check if a given JSON object encodes a set (i.e., it is a {"SS": [...]}, or "NS", "BS"
|
||||
// and returns set's type and a pointer to that set. If the object does not encode a set,
|
||||
// returned value is {"", nullptr}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "alternator/server.hh"
|
||||
@@ -13,14 +26,13 @@
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <seastar/util/defer.hh>
|
||||
#include <seastar/util/short_streams.hh>
|
||||
#include "seastarx.hh"
|
||||
#include "error.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "auth.hh"
|
||||
#include <cctype>
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
@@ -152,10 +164,8 @@ public:
|
||||
|
||||
protected:
|
||||
void generate_error_reply(reply& rep, const api_error& err) {
|
||||
rjson::value results = rjson::empty_object();
|
||||
rjson::add(results, "__type", rjson::from_string("com.amazonaws.dynamodb.v20120810#" + err._type));
|
||||
rjson::add(results, "message", err._msg);
|
||||
rep._content = rjson::print(std::move(results));
|
||||
rep._content += "{\"__type\":\"com.amazonaws.dynamodb.v20120810#" + err._type + "\"," +
|
||||
"\"message\":\"" + err._msg + "\"}";
|
||||
rep._status = err._http_code;
|
||||
slogger.trace("api_handler error case: {}", rep._content);
|
||||
}
|
||||
@@ -201,9 +211,10 @@ protected:
|
||||
// It's very easy to get a list of all live nodes on the cluster,
|
||||
// using _gossiper().get_live_members(). But getting
|
||||
// just the list of live nodes in this DC needs more elaborate code:
|
||||
auto& topology = _proxy.get_token_metadata_ptr()->get_topology();
|
||||
sstring local_dc = topology.get_datacenter();
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes = topology.get_datacenter_endpoints().at(local_dc);
|
||||
sstring local_dc = locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(
|
||||
utils::fb_utilities::get_broadcast_address());
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes =
|
||||
_proxy.get_token_metadata_ptr()->get_topology().get_datacenter_endpoints().at(local_dc);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
if (_gossiper.is_alive(ip)) {
|
||||
rjson::push_back(results, rjson::from_string(ip.to_sstring()));
|
||||
@@ -235,7 +246,7 @@ protected:
|
||||
future<std::string> server::verify_signature(const request& req, const chunked_content& content) {
|
||||
if (!_enforce_authorization) {
|
||||
slogger.debug("Skipping authorization");
|
||||
return make_ready_future<std::string>();
|
||||
return make_ready_future<std::string>("<unauthenticated request>");
|
||||
}
|
||||
auto host_it = req._headers.find("Host");
|
||||
if (host_it == req._headers.end()) {
|
||||
@@ -365,9 +376,7 @@ static tracing::trace_state_ptr maybe_trace_query(service::client_state& client_
|
||||
tracing::add_session_param(trace_state, "alternator_op", op);
|
||||
tracing::add_query(trace_state, truncated_content_view(query, buf));
|
||||
tracing::begin(trace_state, format("Alternator {}", op), client_state.get_client_address());
|
||||
if (!username.empty()) {
|
||||
tracing::set_username(trace_state, auth::authenticated_user(username));
|
||||
}
|
||||
tracing::set_username(trace_state, auth::authenticated_user(username));
|
||||
}
|
||||
return trace_state;
|
||||
}
|
||||
@@ -390,7 +399,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
||||
}
|
||||
auto units = co_await std::move(units_fut);
|
||||
assert(req->content_stream);
|
||||
chunked_content content = co_await util::read_entire_stream(*req->content_stream);
|
||||
chunked_content content = co_await httpd::read_entire_stream(*req->content_stream);
|
||||
auto username = co_await verify_signature(*req, content);
|
||||
|
||||
if (slogger.is_enabled(log_level::trace)) {
|
||||
@@ -410,11 +419,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
||||
auto leave = defer([this] () noexcept { _pending_requests.leave(); });
|
||||
//FIXME: Client state can provide more context, e.g. client's endpoint address
|
||||
// We use unique_ptr because client_state cannot be moved or copied
|
||||
executor::client_state client_state = username.empty()
|
||||
? service::client_state{service::client_state::internal_tag()}
|
||||
: service::client_state{service::client_state::internal_tag(), _auth_service, _sl_controller, username};
|
||||
co_await client_state.maybe_update_per_service_level_params();
|
||||
|
||||
executor::client_state client_state{executor::client_state::internal_tag()};
|
||||
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content);
|
||||
tracing::trace(trace_state, op);
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
@@ -447,14 +452,12 @@ void server::set_routes(routes& r) {
|
||||
//FIXME: A way to immediately invalidate the cache should be considered,
|
||||
// e.g. when the system table which stores the keys is changed.
|
||||
// For now, this propagation may take up to 1 minute.
|
||||
server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& auth_service, qos::service_level_controller& sl_controller)
|
||||
server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gossiper)
|
||||
: _http_server("http-alternator")
|
||||
, _https_server("https-alternator")
|
||||
, _executor(exec)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(gossiper)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _key_cache(1024, 1min, slogger)
|
||||
, _enforce_authorization(false)
|
||||
, _enabled_servers{}
|
||||
@@ -529,9 +532,6 @@ server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gos
|
||||
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
} {
|
||||
}
|
||||
|
||||
@@ -545,28 +545,36 @@ future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std:
|
||||
" must be specified in order to init an alternator HTTP server instance"));
|
||||
}
|
||||
return seastar::async([this, addr, port, https_port, creds] {
|
||||
_executor.start().get();
|
||||
try {
|
||||
_executor.start().get();
|
||||
|
||||
if (port) {
|
||||
set_routes(_http_server._routes);
|
||||
_http_server.set_content_length_limit(server::content_length_limit);
|
||||
_http_server.set_content_streaming(true);
|
||||
_http_server.listen(socket_address{addr, *port}).get();
|
||||
_enabled_servers.push_back(std::ref(_http_server));
|
||||
}
|
||||
if (https_port) {
|
||||
set_routes(_https_server._routes);
|
||||
_https_server.set_content_length_limit(server::content_length_limit);
|
||||
_https_server.set_content_streaming(true);
|
||||
_https_server.set_tls_credentials(creds->build_reloadable_server_credentials([](const std::unordered_set<sstring>& files, std::exception_ptr ep) {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get0());
|
||||
_https_server.listen(socket_address{addr, *https_port}).get();
|
||||
_enabled_servers.push_back(std::ref(_https_server));
|
||||
if (port) {
|
||||
set_routes(_http_server._routes);
|
||||
_http_server.set_content_length_limit(server::content_length_limit);
|
||||
_http_server.set_content_streaming(true);
|
||||
_http_server.listen(socket_address{addr, *port}).get();
|
||||
_enabled_servers.push_back(std::ref(_http_server));
|
||||
}
|
||||
if (https_port) {
|
||||
set_routes(_https_server._routes);
|
||||
_https_server.set_content_length_limit(server::content_length_limit);
|
||||
_https_server.set_content_streaming(true);
|
||||
_https_server.set_tls_credentials(creds->build_reloadable_server_credentials([](const std::unordered_set<sstring>& files, std::exception_ptr ep) {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get0());
|
||||
_https_server.listen(socket_address{addr, *https_port}).get();
|
||||
_enabled_servers.push_back(std::ref(_https_server));
|
||||
}
|
||||
} catch (...) {
|
||||
slogger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}: {}",
|
||||
addr, port ? std::to_string(*port) : "OFF", https_port ? std::to_string(*https_port) : "OFF", std::current_exception());
|
||||
std::throw_with_nested(std::runtime_error(
|
||||
format("Failed to set up Alternator HTTP server on {} port {}, TLS port {}",
|
||||
addr, port ? std::to_string(*port) : "OFF", https_port ? std::to_string(*https_port) : "OFF")));
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -623,7 +631,7 @@ future<> server::json_parser::stop() {
|
||||
|
||||
const char* api_error::what() const noexcept {
|
||||
if (_what_string.empty()) {
|
||||
_what_string = format("{} {}: {}", static_cast<int>(_http_code), _type, _msg);
|
||||
_what_string = format("{} {}: {}", _http_code, _type, _msg);
|
||||
}
|
||||
return _what_string.c_str();
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -15,7 +28,6 @@
|
||||
#include <seastar/net/tls.hh>
|
||||
#include <optional>
|
||||
#include "alternator/auth.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
#include <seastar/core/units.hh>
|
||||
@@ -35,8 +47,6 @@ class server {
|
||||
executor& _executor;
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
auth::service& _auth_service;
|
||||
qos::service_level_controller& _sl_controller;
|
||||
|
||||
key_cache _key_cache;
|
||||
bool _enforce_authorization;
|
||||
@@ -68,7 +78,7 @@ class server {
|
||||
json_parser _json_parser;
|
||||
|
||||
public:
|
||||
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller);
|
||||
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper);
|
||||
|
||||
future<> init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||
bool enforce_authorization, semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests);
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "stats.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <type_traits>
|
||||
@@ -15,6 +28,7 @@
|
||||
|
||||
#include "utils/base64.hh"
|
||||
#include "log.hh"
|
||||
#include "database.hh"
|
||||
#include "db/config.hh"
|
||||
|
||||
#include "cdc/log.hh"
|
||||
@@ -33,6 +47,7 @@
|
||||
#include "gms/feature_service.hh"
|
||||
|
||||
#include "executor.hh"
|
||||
#include "tags_extension.hh"
|
||||
#include "rmw_operation.hh"
|
||||
|
||||
/**
|
||||
@@ -74,8 +89,8 @@ struct rapidjson::internal::TypeHelper<ValueType, utils::UUID>
|
||||
: public from_string_helper<ValueType, utils::UUID>
|
||||
{};
|
||||
|
||||
static db_clock::time_point as_timepoint(const table_id& tid) {
|
||||
return db_clock::time_point{utils::UUID_gen::unix_timestamp(tid.uuid())};
|
||||
static db_clock::time_point as_timepoint(const utils::UUID& uuid) {
|
||||
return db_clock::time_point{utils::UUID_gen::unix_timestamp(uuid)};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -106,9 +121,6 @@ public:
|
||||
stream_arn(const UUID& uuid)
|
||||
: UUID(uuid)
|
||||
{}
|
||||
stream_arn(const table_id& tid)
|
||||
: UUID(tid.uuid())
|
||||
{}
|
||||
stream_arn(std::string_view v)
|
||||
: UUID(v.substr(1))
|
||||
{
|
||||
@@ -143,8 +155,8 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
auto limit = rjson::get_opt<int>(request, "Limit").value_or(std::numeric_limits<int>::max());
|
||||
auto streams_start = rjson::get_opt<stream_arn>(request, "ExclusiveStartStreamArn");
|
||||
auto table = find_table(_proxy, request);
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto cfs = db.get_tables();
|
||||
auto& db = _proxy.get_db().local();
|
||||
auto& cfs = db.get_column_families();
|
||||
auto i = cfs.begin();
|
||||
auto e = cfs.end();
|
||||
|
||||
@@ -157,10 +169,10 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
// between queries may or may not miss info. But that should be rare,
|
||||
// and we can probably expect this to be a single call.
|
||||
if (streams_start) {
|
||||
i = std::find_if(i, e, [&](data_dictionary::table t) {
|
||||
return t.schema()->id().uuid() == streams_start
|
||||
&& cdc::get_base_table(db.real_database(), *t.schema())
|
||||
&& is_alternator_keyspace(t.schema()->ks_name())
|
||||
i = std::find_if(i, e, [&](const std::pair<utils::UUID, lw_shared_ptr<column_family>>& p) {
|
||||
return p.first == streams_start
|
||||
&& cdc::get_base_table(db, *p.second->schema())
|
||||
&& is_alternator_keyspace(p.second->schema()->ks_name())
|
||||
;
|
||||
});
|
||||
if (i != e) {
|
||||
@@ -174,7 +186,7 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
std::optional<stream_arn> last;
|
||||
|
||||
for (;limit > 0 && i != e; ++i) {
|
||||
auto s = i->schema();
|
||||
auto s = i->second->schema();
|
||||
auto& ks_name = s->ks_name();
|
||||
auto& cf_name = s->cf_name();
|
||||
|
||||
@@ -184,14 +196,14 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
if (table && ks_name != table->ks_name()) {
|
||||
continue;
|
||||
}
|
||||
if (cdc::is_log_for_some_table(db.real_database(), ks_name, cf_name)) {
|
||||
if (table && table != cdc::get_base_table(db.real_database(), *s)) {
|
||||
if (cdc::is_log_for_some_table(db, ks_name, cf_name)) {
|
||||
if (table && table != cdc::get_base_table(db, *s)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
rjson::value new_entry = rjson::empty_object();
|
||||
|
||||
last = i->schema()->id();
|
||||
last = i->first;
|
||||
rjson::add(new_entry, "StreamArn", *last);
|
||||
rjson::add(new_entry, "StreamLabel", rjson::from_string(stream_label(*s)));
|
||||
rjson::add(new_entry, "TableName", rjson::from_string(cdc::base_name(table_name(*s))));
|
||||
@@ -412,7 +424,7 @@ using namespace std::string_literals;
|
||||
* This will be a partial overlap, but it is the best we can do.
|
||||
*/
|
||||
|
||||
static std::chrono::seconds confidence_interval(data_dictionary::database db) {
|
||||
static std::chrono::seconds confidence_interval(const database& db) {
|
||||
return std::chrono::seconds(db.get_config().alternator_streams_time_window_s());
|
||||
}
|
||||
|
||||
@@ -430,12 +442,12 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
||||
auto stream_arn = rjson::get<alternator::stream_arn>(request, "StreamArn");
|
||||
|
||||
schema_ptr schema, bs;
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto& db = _proxy.get_db().local();
|
||||
|
||||
try {
|
||||
auto cf = db.find_column_family(table_id(stream_arn));
|
||||
auto& cf = db.find_column_family(stream_arn);
|
||||
schema = cf.schema();
|
||||
bs = cdc::get_base_table(db.real_database(), *schema);
|
||||
bs = cdc::get_base_table(_proxy.get_db().local(), *schema);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
@@ -493,7 +505,7 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
||||
// filter out cdc generations older than the table or now() - cdc::ttl (typically dynamodb_streams_max_window - 24h)
|
||||
auto low_ts = std::max(as_timepoint(schema->id()), db_clock::now() - ttl);
|
||||
|
||||
return _sdks.cdc_get_versioned_streams(low_ts, { normal_token_owners }).then([this, db, shard_start, limit, ret = std::move(ret), stream_desc = std::move(stream_desc)] (std::map<db_clock::time_point, cdc::streams_version> topologies) mutable {
|
||||
return _sdks.cdc_get_versioned_streams(low_ts, { normal_token_owners }).then([this, &db, shard_start, limit, ret = std::move(ret), stream_desc = std::move(stream_desc)] (std::map<db_clock::time_point, cdc::streams_version> topologies) mutable {
|
||||
|
||||
auto e = topologies.end();
|
||||
auto prev = e;
|
||||
@@ -714,18 +726,18 @@ future<executor::request_return_type> executor::get_shard_iterator(client_state&
|
||||
}
|
||||
|
||||
auto stream_arn = rjson::get<alternator::stream_arn>(request, "StreamArn");
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto& db = _proxy.get_db().local();
|
||||
|
||||
schema_ptr schema = nullptr;
|
||||
std::optional<shard_id> sid;
|
||||
|
||||
try {
|
||||
auto cf = db.find_column_family(table_id(stream_arn));
|
||||
auto& cf = db.find_column_family(stream_arn);
|
||||
schema = cf.schema();
|
||||
sid = rjson::get<shard_id>(request, "ShardId");
|
||||
} catch (...) {
|
||||
}
|
||||
if (!schema || !cdc::get_base_table(db.real_database(), *schema) || !is_alternator_keyspace(schema->ks_name())) {
|
||||
if (!schema || !cdc::get_base_table(db, *schema) || !is_alternator_keyspace(schema->ks_name())) {
|
||||
throw api_error::resource_not_found("Invalid StreamArn");
|
||||
}
|
||||
if (!sid) {
|
||||
@@ -802,12 +814,12 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
throw api_error::validation("Limit must be 1 or more");
|
||||
}
|
||||
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto& db = _proxy.get_db().local();
|
||||
schema_ptr schema, base;
|
||||
try {
|
||||
auto log_table = db.find_column_family(table_id(iter.table));
|
||||
auto& log_table = db.find_column_family(iter.table);
|
||||
schema = log_table.schema();
|
||||
base = cdc::get_base_table(db.real_database(), *schema);
|
||||
base = cdc::get_base_table(db, *schema);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
@@ -835,14 +847,14 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
static const bytes op_column_name = cdc::log_meta_column_name_bytes("operation");
|
||||
static const bytes eor_column_name = cdc::log_meta_column_name_bytes("end_of_batch");
|
||||
|
||||
std::optional<attrs_to_get> key_names = boost::copy_range<attrs_to_get>(
|
||||
auto key_names = boost::copy_range<attrs_to_get>(
|
||||
boost::range::join(std::move(base->partition_key_columns()), std::move(base->clustering_key_columns()))
|
||||
| boost::adaptors::transformed([&] (const column_definition& cdef) {
|
||||
return std::make_pair<std::string, attrs_to_get_node>(cdef.name_as_text(), {}); })
|
||||
);
|
||||
// Include all base table columns as values (in case pre or post is enabled).
|
||||
// This will include attributes not stored in the frozen map column
|
||||
std::optional<attrs_to_get> attr_names = boost::copy_range<attrs_to_get>(base->regular_columns()
|
||||
auto attr_names = boost::copy_range<attrs_to_get>(base->regular_columns()
|
||||
// this will include the :attrs column, which we will also force evaluating.
|
||||
// But not having this set empty forces out any cdc columns from actual result
|
||||
| boost::adaptors::transformed([] (const column_definition& cdef) {
|
||||
@@ -879,7 +891,7 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
++mul;
|
||||
}
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, _proxy.get_max_result_size(partition_slice),
|
||||
query::tombstone_limit(_proxy.get_tombstone_limit()), query::row_limit(limit * mul));
|
||||
query::row_limit(limit * mul));
|
||||
|
||||
return _proxy.query(schema, std::move(command), std::move(partition_ranges), cl, service::storage_proxy::coordinator_query_options(default_timeout(), std::move(permit), client_state)).then(
|
||||
[this, schema, partition_slice = std::move(partition_slice), selection = std::move(selection), start_time = std::move(start_time), limit, key_names = std::move(key_names), attr_names = std::move(attr_names), type, iter, high_ts] (service::storage_proxy::coordinator_query_result qr) mutable {
|
||||
@@ -1011,8 +1023,8 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
|
||||
// ugh. figure out if we are and end-of-shard
|
||||
auto normal_token_owners = _proxy.get_token_metadata_ptr()->count_normal_token_owners();
|
||||
|
||||
return _sdks.cdc_current_generation_timestamp({ normal_token_owners }).then([this, iter, high_ts, start_time, ret = std::move(ret), nrecords](db_clock::time_point ts) mutable {
|
||||
|
||||
return _sdks.cdc_current_generation_timestamp({ normal_token_owners }).then([this, iter, high_ts, start_time, ret = std::move(ret)](db_clock::time_point ts) mutable {
|
||||
auto& shard = iter.shard;
|
||||
|
||||
if (shard.time < ts && ts < high_ts) {
|
||||
@@ -1029,28 +1041,24 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
rjson::add(ret, "NextShardIterator", iter);
|
||||
}
|
||||
_stats.api_operations.get_records_latency.add(std::chrono::steady_clock::now() - start_time);
|
||||
// TODO: determine a better threshold...
|
||||
if (nrecords > 10) {
|
||||
return make_ready_future<executor::request_return_type>(make_streamed(std::move(ret)));
|
||||
}
|
||||
return make_ready_future<executor::request_return_type>(make_jsonable(std::move(ret)));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void executor::add_stream_options(const rjson::value& stream_specification, schema_builder& builder, service::storage_proxy& sp) {
|
||||
void executor::add_stream_options(const rjson::value& stream_specification, schema_builder& builder) const {
|
||||
auto stream_enabled = rjson::find(stream_specification, "StreamEnabled");
|
||||
if (!stream_enabled || !stream_enabled->IsBool()) {
|
||||
throw api_error::validation("StreamSpecification needs boolean StreamEnabled");
|
||||
}
|
||||
|
||||
if (stream_enabled->GetBool()) {
|
||||
auto db = sp.data_dictionary();
|
||||
auto& db = _proxy.get_db().local();
|
||||
|
||||
if (!db.features().cdc) {
|
||||
if (!db.features().cluster_supports_cdc()) {
|
||||
throw api_error::validation("StreamSpecification: streams (CDC) feature not enabled in cluster.");
|
||||
}
|
||||
if (!db.features().alternator_streams) {
|
||||
if (!db.features().cluster_supports_alternator_streams()) {
|
||||
throw api_error::validation("StreamSpecification: alternator streams feature not enabled in cluster.");
|
||||
}
|
||||
|
||||
@@ -1082,11 +1090,11 @@ void executor::add_stream_options(const rjson::value& stream_specification, sche
|
||||
}
|
||||
}
|
||||
|
||||
void executor::supplement_table_stream_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp) {
|
||||
void executor::supplement_table_stream_info(rjson::value& descr, const schema& schema) const {
|
||||
auto& opts = schema.cdc_options();
|
||||
if (opts.enabled()) {
|
||||
auto db = sp.data_dictionary();
|
||||
auto cf = db.find_table(schema.ks_name(), cdc::log_name(schema.cf_name()));
|
||||
auto& db = _proxy.get_db().local();
|
||||
auto& cf = db.find_column_family(schema.ks_name(), cdc::log_name(schema.cf_name()));
|
||||
stream_arn arn(cf.schema()->id());
|
||||
rjson::add(descr, "LatestStreamArn", arn);
|
||||
rjson::add(descr, "LatestStreamLabel", rjson::from_string(stream_label(*cf.schema())));
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -12,7 +25,7 @@
|
||||
#include "schema.hh"
|
||||
#include "db/extensions.hh"
|
||||
|
||||
namespace db {
|
||||
namespace alternator {
|
||||
|
||||
class tags_extension : public schema_extension {
|
||||
public:
|
||||
@@ -37,9 +50,4 @@ private:
|
||||
std::map<sstring, sstring> _tags;
|
||||
};
|
||||
|
||||
// Information whether the view updates are synchronous is stored using the
|
||||
// SYNCHRONOUS_VIEW_UPDATES_TAG_KEY tag. Value of this tag is a stored as a
|
||||
// serialized boolean value ("true" or "false")
|
||||
static const sstring SYNCHRONOUS_VIEW_UPDATES_TAG_KEY("system:synchronous_view_updates");
|
||||
|
||||
}
|
||||
@@ -3,54 +3,30 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/lowres_clock.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/multiprecision/cpp_int.hpp>
|
||||
|
||||
#include "gms/gossiper.hh"
|
||||
#include "gms/inet_address.hh"
|
||||
#include "inet_address_vectors.hh"
|
||||
#include "locator/abstract_replication_strategy.hh"
|
||||
#include "log.hh"
|
||||
#include "gc_clock.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "service_permit.hh"
|
||||
#include "timestamp.hh"
|
||||
#include "executor.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "service/pager/paging_state.hh"
|
||||
#include "service/pager/query_pagers.hh"
|
||||
#include "gms/feature_service.hh"
|
||||
#include "sstables/types.hh"
|
||||
#include "mutation.hh"
|
||||
#include "types.hh"
|
||||
#include "types/map.hh"
|
||||
#include "database.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "cql3/values.hh"
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include "alternator/executor.hh"
|
||||
#include "alternator/controller.hh"
|
||||
#include "alternator/serialization.hh"
|
||||
#include "dht/sharder.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/tags/utils.hh"
|
||||
|
||||
#include "ttl.hh"
|
||||
|
||||
static logging::logger tlogger("alternator_ttl");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -65,8 +41,8 @@ static const sstring TTL_TAG_KEY("system:ttl_attribute");
|
||||
|
||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.update_time_to_live++;
|
||||
if (!_proxy.data_dictionary().features().alternator_ttl) {
|
||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator-ttl' experimental feature is enabled on all nodes.");
|
||||
if (!_proxy.get_db().local().features().cluster_supports_alternator_ttl()) {
|
||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator_ttl' experimental feature is enabled on all nodes.");
|
||||
}
|
||||
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
@@ -92,7 +68,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
}
|
||||
sstring attribute_name(v->GetString(), v->GetStringLength());
|
||||
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table(schema);
|
||||
if (enabled) {
|
||||
if (tags_map.contains(TTL_TAG_KEY)) {
|
||||
co_return api_error::validation("TTL is already enabled");
|
||||
@@ -109,7 +85,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
}
|
||||
tags_map.erase(TTL_TAG_KEY);
|
||||
}
|
||||
co_await db::update_tags(_mm, schema, std::move(tags_map));
|
||||
co_await update_tags(_mm, schema, std::move(tags_map));
|
||||
// Prepare the response, which contains a TimeToLiveSpecification
|
||||
// basically identical to the request's
|
||||
rjson::value response = rjson::empty_object();
|
||||
@@ -120,7 +96,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.describe_time_to_live++;
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table(schema);
|
||||
rjson::value desc = rjson::empty_object();
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
@@ -134,692 +110,4 @@ future<executor::request_return_type> executor::describe_time_to_live(client_sta
|
||||
co_return make_jsonable(std::move(response));
|
||||
}
|
||||
|
||||
// expiration_service is a sharded service responsible for cleaning up expired
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLive request.
|
||||
//
|
||||
// Here is a brief overview of how the expiration service works:
|
||||
//
|
||||
// An expiration thread on each shard periodically scans the items (i.e.,
|
||||
// rows) owned by this shard, looking for items whose chosen expiration-time
|
||||
// attribute indicates they are expired, and deletes those items.
|
||||
// The expiration-time "attribute" can be either an actual Scylla column
|
||||
// (must be numeric) or an Alternator "attribute" - i.e., an element in
|
||||
// the ATTRS_COLUMN_NAME map<utf8,bytes> column where the numeric expiration
|
||||
// time is encoded in DynamoDB's JSON encoding inside the bytes value.
|
||||
// To avoid scanning the same items RF times in RF replicas, only one node is
|
||||
// responsible for scanning a token range at a time. Normally, this is the
|
||||
// node owning this range as a "primary range" (the first node in the ring
|
||||
// with this range), but when this node is down, the secondary owner (the
|
||||
// second in the ring) may take over.
|
||||
// An expiration thread is reponsible for all tables which need expiration
|
||||
// scans. Currently, the different tables are scanned sequentially (not in
|
||||
// parallel).
|
||||
// The expiration thread scans item using CL=QUORUM to ensures that it reads
|
||||
// a consistent expiration-time attribute. This means that the items are read
|
||||
// locally and in addition QUORUM-1 additional nodes (one additional node
|
||||
// when RF=3) need to read the data and send digests.
|
||||
// When the expiration thread decides that an item has expired and wants
|
||||
// to delete it, it does it using a CL=QUORUM write. This allows this
|
||||
// deletion to be visible for consistent (quorum) reads. The deletion,
|
||||
// like user deletions, will also appear on the CDC log and therefore
|
||||
// Alternator Streams if enabled - currently as ordinary deletes (the
|
||||
// userIdentity flag is currently missing this is issue #11523).
|
||||
expiration_service::expiration_service(data_dictionary::database db, service::storage_proxy& proxy, gms::gossiper& g)
|
||||
: _db(db)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(g)
|
||||
{
|
||||
}
|
||||
|
||||
// Convert the big_decimal used to represent expiration time to an integer.
|
||||
// Any fractional part is dropped. If the number is negative or invalid,
|
||||
// 0 is returned, and if it's too high, the maximum unsigned long is returned.
|
||||
static unsigned long bigdecimal_to_ul(const big_decimal& bd) {
|
||||
// The big_decimal format has an integer mantissa of arbitrary length
|
||||
// "unscaled_value" and then a (power of 10) exponent "scale".
|
||||
if (bd.unscaled_value() <= 0) {
|
||||
return 0;
|
||||
}
|
||||
if (bd.scale() == 0) {
|
||||
// The fast path, when the expiration time is an integer, scale==0.
|
||||
return static_cast<unsigned long>(bd.unscaled_value());
|
||||
}
|
||||
// Because the mantissa can be of arbitrary length, we work on it
|
||||
// as a string. TODO: find a less ugly algorithm.
|
||||
auto str = bd.unscaled_value().str();
|
||||
if (bd.scale() > 0) {
|
||||
int len = str.length();
|
||||
if (len < bd.scale()) {
|
||||
return 0;
|
||||
}
|
||||
str = str.substr(0, len-bd.scale());
|
||||
} else {
|
||||
if (bd.scale() < -20) {
|
||||
return std::numeric_limits<unsigned long>::max();
|
||||
}
|
||||
for (int i = 0; i < -bd.scale(); i++) {
|
||||
str.push_back('0');
|
||||
}
|
||||
}
|
||||
// strtoul() returns ULONG_MAX if the number is too large, or 0 if not
|
||||
// a number.
|
||||
return strtoul(str.c_str(), nullptr, 10);
|
||||
}
|
||||
|
||||
// The following is_expired() functions all check if an item with the given
|
||||
// expiration time has expired, according to the DynamoDB API rules.
|
||||
// The rules are:
|
||||
// 1. If the expiration time attribute's value is not a number type,
|
||||
// the item is not expired.
|
||||
// 2. The expiration time is measured in seconds since the UNIX epoch.
|
||||
// 3. If the expiration time is more than 5 years in the past, it is assumed
|
||||
// to be malformed and ignored - and the item does not expire.
|
||||
static bool is_expired(gc_clock::time_point expiration_time, gc_clock::time_point now) {
|
||||
return expiration_time <= now &&
|
||||
expiration_time > now - std::chrono::years(5);
|
||||
}
|
||||
|
||||
static bool is_expired(const big_decimal& expiration_time, gc_clock::time_point now) {
|
||||
unsigned long t = bigdecimal_to_ul(expiration_time);
|
||||
// We assume - and the assumption turns out to be correct - that the
|
||||
// epoch of gc_clock::time_point and the one used by the DynamoDB protocol
|
||||
// are the same (the UNIX epoch in UTC). The resolution (seconds) is also
|
||||
// the same.
|
||||
return is_expired(gc_clock::time_point(gc_clock::duration(std::chrono::seconds(t))), now);
|
||||
}
|
||||
static bool is_expired(const rjson::value& expiration_time, gc_clock::time_point now) {
|
||||
std::optional<big_decimal> n = try_unwrap_number(expiration_time);
|
||||
return n && is_expired(*n, now);
|
||||
}
|
||||
|
||||
// expire_item() expires an item - i.e., deletes it as appropriate for
|
||||
// expiration - with CL=QUORUM and (FIXME!) in a way Alternator Streams
|
||||
// understands it is an expiration event - not a user-initiated deletion.
|
||||
static future<> expire_item(service::storage_proxy& proxy,
|
||||
const service::query_state& qs,
|
||||
const std::vector<bytes_opt>& row,
|
||||
schema_ptr schema,
|
||||
api::timestamp_type ts) {
|
||||
// Prepare the row key to delete
|
||||
// NOTICE: the order of columns is guaranteed by the fact that selection::wildcard
|
||||
// is used, which indicates that columns appear in the order defined by
|
||||
// schema::all_columns_in_select_order() - partition key columns goes first,
|
||||
// immediately followed by clustering key columns
|
||||
std::vector<bytes> exploded_pk;
|
||||
const unsigned pk_size = schema->partition_key_size();
|
||||
const unsigned ck_size = schema->clustering_key_size();
|
||||
for (unsigned c = 0; c < pk_size; ++c) {
|
||||
const auto& row_c = row[c];
|
||||
if (!row_c) {
|
||||
// This shouldn't happen - all key columns must have values.
|
||||
// But if it ever happens, let's just *not* expire the item.
|
||||
// FIXME: log or increment a metric if this happens.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
exploded_pk.push_back(*row_c);
|
||||
}
|
||||
auto pk = partition_key::from_exploded(exploded_pk);
|
||||
mutation m(schema, pk);
|
||||
// If there's no clustering key, a tombstone should be created directly
|
||||
// on a partition, not on a clustering row - otherwise it will look like
|
||||
// an open-ended range tombstone, which will crash on KA/LA sstable format.
|
||||
// See issue #6035
|
||||
if (ck_size == 0) {
|
||||
m.partition().apply(tombstone(ts, gc_clock::now()));
|
||||
} else {
|
||||
std::vector<bytes> exploded_ck;
|
||||
for (unsigned c = pk_size; c < pk_size + ck_size; ++c) {
|
||||
const auto& row_c = row[c];
|
||||
if (!row_c) {
|
||||
// This shouldn't happen - all key columns must have values.
|
||||
// But if it ever happens, let's just *not* expire the item.
|
||||
// FIXME: log or increment a metric if this happens.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
exploded_ck.push_back(*row_c);
|
||||
}
|
||||
auto ck = clustering_key::from_exploded(exploded_ck);
|
||||
m.partition().clustered_row(*schema, ck).apply(tombstone(ts, gc_clock::now()));
|
||||
}
|
||||
std::vector<mutation> mutations;
|
||||
mutations.push_back(std::move(m));
|
||||
return proxy.mutate(std::move(mutations),
|
||||
db::consistency_level::LOCAL_QUORUM,
|
||||
executor::default_timeout(), // FIXME - which timeout?
|
||||
qs.get_trace_state(), qs.get_permit(),
|
||||
db::allow_per_partition_rate_limit::no);
|
||||
}
|
||||
|
||||
static size_t random_offset(size_t min, size_t max) {
|
||||
static thread_local std::default_random_engine re{std::random_device{}()};
|
||||
std::uniform_int_distribution<size_t> dist(min, max);
|
||||
return dist(re);
|
||||
}
|
||||
|
||||
// Get a list of secondary token ranges for the given node, and the primary
|
||||
// node responsible for each of these token ranges.
|
||||
// A "secondary range" is a range of tokens where for each token, the second
|
||||
// node (in ring order) out of the RF replicas that hold this token is the
|
||||
// given node.
|
||||
// In the expiration scanner, we want to scan a secondary range but only if
|
||||
// this range's primary node is down. For this we need to return not just
|
||||
// a list of this node's secondary ranges - but also the primary owner of
|
||||
// each of those ranges.
|
||||
static std::vector<std::pair<dht::token_range, gms::inet_address>> get_secondary_ranges(
|
||||
const locator::effective_replication_map_ptr& erm,
|
||||
gms::inet_address ep) {
|
||||
const auto& tm = *erm->get_token_metadata_ptr();
|
||||
const auto& sorted_tokens = tm.sorted_tokens();
|
||||
std::vector<std::pair<dht::token_range, gms::inet_address>> ret;
|
||||
if (sorted_tokens.empty()) {
|
||||
on_internal_error(tlogger, "Token metadata is empty");
|
||||
}
|
||||
auto prev_tok = sorted_tokens.back();
|
||||
for (const auto& tok : sorted_tokens) {
|
||||
inet_address_vector_replica_set eps = erm->get_natural_endpoints(tok);
|
||||
if (eps.size() <= 1 || eps[1] != ep) {
|
||||
prev_tok = tok;
|
||||
continue;
|
||||
}
|
||||
// Add the range (prev_tok, tok] to ret. However, if the range wraps
|
||||
// around, split it to two non-wrapping ranges.
|
||||
if (prev_tok < tok) {
|
||||
ret.emplace_back(
|
||||
dht::token_range{
|
||||
dht::token_range::bound(prev_tok, false),
|
||||
dht::token_range::bound(tok, true)},
|
||||
eps[0]);
|
||||
} else {
|
||||
ret.emplace_back(
|
||||
dht::token_range{
|
||||
dht::token_range::bound(prev_tok, false),
|
||||
std::nullopt},
|
||||
eps[0]);
|
||||
ret.emplace_back(
|
||||
dht::token_range{
|
||||
std::nullopt,
|
||||
dht::token_range::bound(tok, true)},
|
||||
eps[0]);
|
||||
}
|
||||
prev_tok = tok;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
// A class for iterating over all the token ranges *owned* by this shard.
|
||||
// To avoid code duplication, it is a template with two distinct cases -
|
||||
// <primary> and <secondary>:
|
||||
//
|
||||
// In the <primary> case, we consider a token *owned* by this shard if:
|
||||
// 1. This node is a replica for this token.
|
||||
// 2. Moreover, this node is the *primary* replica of the token (i.e., the
|
||||
// first replica in the ring).
|
||||
// 3. In this node, this shard is responsible for this token.
|
||||
// We will use this definition of which shard in the cluster owns which tokens
|
||||
// to split the expiration scanner's work between all the shards of the
|
||||
// system.
|
||||
//
|
||||
// In the <secondary> case, we consider a token *owned* by this shard if:
|
||||
// 1. This node is the *secondary* replica for this token (i.e., the second
|
||||
// replica in the ring).
|
||||
// 2. The primary replica for this token is currently marked down.
|
||||
// 3. In this node, this shard is responsible for this token.
|
||||
// We use the <secondary> case to handle the possibility that some of the
|
||||
// nodes in the system are down. A dead node will not be expiring
|
||||
// the tokens owned by it, so we want the secondary owner to take over its
|
||||
// primary ranges.
|
||||
//
|
||||
// FIXME: need to decide how to choose primary ranges in multi-DC setup!
|
||||
// We could call get_primary_ranges_within_dc() below instead of get_primary_ranges().
|
||||
// NOTICE: Iteration currently starts from a random token range in order to improve
|
||||
// the chances of covering all ranges during a scan when restarts occur.
|
||||
// A more deterministic way would be to regularly persist the scanning state,
|
||||
// but that incurs overhead that we want to avoid if not needed.
|
||||
enum primary_or_secondary_t {primary, secondary};
|
||||
template<primary_or_secondary_t primary_or_secondary>
|
||||
class token_ranges_owned_by_this_shard {
|
||||
// ranges_holder_primary holds just the primary ranges themselves
|
||||
class ranges_holder_primary {
|
||||
const dht::token_range_vector _token_ranges;
|
||||
public:
|
||||
ranges_holder_primary(const locator::effective_replication_map_ptr& erm, gms::gossiper& g, gms::inet_address ep)
|
||||
: _token_ranges(erm->get_primary_ranges(ep)) {}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
return _token_ranges[i];
|
||||
}
|
||||
bool should_skip(std::size_t i) const {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
// ranges_holder<secondary> holds the secondary token ranges plus each
|
||||
// range's primary owner, needed to implement should_skip().
|
||||
class ranges_holder_secondary {
|
||||
std::vector<std::pair<dht::token_range, gms::inet_address>> _token_ranges;
|
||||
gms::gossiper& _gossiper;
|
||||
public:
|
||||
ranges_holder_secondary(const locator::effective_replication_map_ptr& erm, gms::gossiper& g, gms::inet_address ep)
|
||||
: _token_ranges(get_secondary_ranges(erm, ep))
|
||||
, _gossiper(g) {}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
return _token_ranges[i].first;
|
||||
}
|
||||
// range i should be skipped if its primary owner is alive.
|
||||
bool should_skip(std::size_t i) const {
|
||||
return _gossiper.is_alive(_token_ranges[i].second);
|
||||
}
|
||||
};
|
||||
|
||||
schema_ptr _s;
|
||||
// _token_ranges will contain a list of token ranges owned by this node.
|
||||
// We'll further need to split each such range to the pieces owned by
|
||||
// the current shard, using _intersecter.
|
||||
using ranges_holder = std::conditional_t<
|
||||
primary_or_secondary == primary_or_secondary_t::primary,
|
||||
ranges_holder_primary,
|
||||
ranges_holder_secondary>;
|
||||
const ranges_holder _token_ranges;
|
||||
// NOTICE: _range_idx is used modulo _token_ranges size when accessing
|
||||
// the data to ensure that it doesn't go out of bounds
|
||||
size_t _range_idx;
|
||||
size_t _end_idx;
|
||||
std::optional<dht::selective_token_range_sharder> _intersecter;
|
||||
public:
|
||||
token_ranges_owned_by_this_shard(replica::database& db, gms::gossiper& g, schema_ptr s)
|
||||
: _s(s)
|
||||
, _token_ranges(db.find_keyspace(s->ks_name()).get_effective_replication_map(),
|
||||
g, utils::fb_utilities::get_broadcast_address())
|
||||
, _range_idx(random_offset(0, _token_ranges.size() - 1))
|
||||
, _end_idx(_range_idx + _token_ranges.size())
|
||||
{
|
||||
tlogger.debug("Generating token ranges starting from base range {} of {}", _range_idx, _token_ranges.size());
|
||||
}
|
||||
|
||||
// Return the next token_range owned by this shard, or nullopt when the
|
||||
// iteration ends.
|
||||
std::optional<dht::token_range> next() {
|
||||
// We may need three or more iterations in the following loop if a
|
||||
// vnode doesn't intersect with the given shard at all (such a small
|
||||
// vnode is unlikely, but possible). The loop cannot be infinite
|
||||
// because each iteration of the loop advances _range_idx.
|
||||
for (;;) {
|
||||
if (_intersecter) {
|
||||
std::optional<dht::token_range> ret = _intersecter->next();
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
// done with this range, go to next one
|
||||
++_range_idx;
|
||||
_intersecter = std::nullopt;
|
||||
}
|
||||
if (_range_idx == _end_idx) {
|
||||
return std::nullopt;
|
||||
}
|
||||
// If should_skip(), the range should be skipped. This happens for
|
||||
// a secondary range whose primary owning node is still alive.
|
||||
while (_token_ranges.should_skip(_range_idx % _token_ranges.size())) {
|
||||
++_range_idx;
|
||||
if (_range_idx == _end_idx) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
_intersecter.emplace(_s->get_sharder(), _token_ranges[_range_idx % _token_ranges.size()], this_shard_id());
|
||||
}
|
||||
}
|
||||
|
||||
// Same as next(), just return a partition_range instead of token_range
|
||||
std::optional<dht::partition_range> next_partition_range() {
|
||||
std::optional<dht::token_range> ret = next();
|
||||
if (ret) {
|
||||
return dht::to_partition_range(*ret);
|
||||
} else {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Precomputed information needed to perform a scan on partition ranges
|
||||
struct scan_ranges_context {
|
||||
schema_ptr s;
|
||||
bytes column_name;
|
||||
std::optional<std::string> member;
|
||||
|
||||
::shared_ptr<cql3::selection::selection> selection;
|
||||
std::unique_ptr<service::query_state> query_state_ptr;
|
||||
std::unique_ptr<cql3::query_options> query_options;
|
||||
::lw_shared_ptr<query::read_command> command;
|
||||
|
||||
scan_ranges_context(schema_ptr s, service::storage_proxy& proxy, bytes column_name, std::optional<std::string> member)
|
||||
: s(s)
|
||||
, column_name(column_name)
|
||||
, member(member)
|
||||
{
|
||||
// FIXME: don't read the entire items - read only parts of it.
|
||||
// We must read the key columns (to be able to delete) and also
|
||||
// the requested attribute. If the requested attribute is a map's
|
||||
// member we may be forced to read the entire map - but it would
|
||||
// be good if we can read only the single item of the map - it
|
||||
// should be possible (and a must for issue #7751!).
|
||||
lw_shared_ptr<service::pager::paging_state> paging_state = nullptr;
|
||||
auto regular_columns = boost::copy_range<query::column_id_vector>(
|
||||
s->regular_columns() | boost::adaptors::transformed([] (const column_definition& cdef) { return cdef.id; }));
|
||||
selection = cql3::selection::selection::wildcard(s);
|
||||
query::partition_slice::option_set opts = selection->get_query_options();
|
||||
opts.set<query::partition_slice::option::allow_short_read>();
|
||||
// It is important that the scan bypass cache to avoid polluting it:
|
||||
opts.set<query::partition_slice::option::bypass_cache>();
|
||||
std::vector<query::clustering_range> ck_bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
auto partition_slice = query::partition_slice(std::move(ck_bounds), {}, std::move(regular_columns), opts);
|
||||
command = ::make_lw_shared<query::read_command>(s->id(), s->version(), partition_slice, proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
executor::client_state client_state{executor::client_state::internal_tag()};
|
||||
tracing::trace_state_ptr trace_state;
|
||||
// NOTICE: empty_service_permit is used because the TTL service has fixed parallelism
|
||||
query_state_ptr = std::make_unique<service::query_state>(client_state, trace_state, empty_service_permit());
|
||||
// FIXME: What should we do on multi-DC? Will we run the expiration on the same ranges on all
|
||||
// DCs or only once for each range? If the latter, we need to change the CLs in the
|
||||
// scanner and deleter.
|
||||
db::consistency_level cl = db::consistency_level::LOCAL_QUORUM;
|
||||
query_options = std::make_unique<cql3::query_options>(cl, std::vector<cql3::raw_value>{});
|
||||
query_options = std::make_unique<cql3::query_options>(std::move(query_options), std::move(paging_state));
|
||||
}
|
||||
};
|
||||
|
||||
// Scan data in a list of token ranges in one table, looking for expired
|
||||
// items and deleting them.
|
||||
// Because of issue #9167, partition_ranges must have a single partition
|
||||
// range for this code to work correctly.
|
||||
static future<> scan_table_ranges(
|
||||
service::storage_proxy& proxy,
|
||||
const scan_ranges_context& scan_ctx,
|
||||
dht::partition_range_vector&& partition_ranges,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
{
|
||||
const schema_ptr& s = scan_ctx.s;
|
||||
assert (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||
auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr,
|
||||
*scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr);
|
||||
while (!p->is_exhausted()) {
|
||||
if (abort_source.abort_requested()) {
|
||||
co_return;
|
||||
}
|
||||
auto units = co_await get_units(page_sem, 1);
|
||||
// We don't to limit page size in number of rows because there is a
|
||||
// builtin limit of the page's size in bytes. Setting this limit to 1
|
||||
// is useful for debugging the paging code with moderate-size data.
|
||||
uint32_t limit = std::numeric_limits<uint32_t>::max();
|
||||
// FIXME: which timeout?
|
||||
// FIXME: if read times out, need to retry it.
|
||||
std::unique_ptr<cql3::result_set> rs = co_await p->fetch_page(limit, gc_clock::now(), executor::default_timeout());
|
||||
auto rows = rs->rows();
|
||||
auto meta = rs->get_metadata().get_names();
|
||||
std::optional<unsigned> expiration_column;
|
||||
for (unsigned i = 0; i < meta.size(); i++) {
|
||||
const cql3::column_specification& col = *meta[i];
|
||||
if (col.name->name() == scan_ctx.column_name) {
|
||||
expiration_column = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!expiration_column) {
|
||||
continue;
|
||||
}
|
||||
for (const auto& row : rows) {
|
||||
const bytes_opt& cell = row[*expiration_column];
|
||||
if (!cell) {
|
||||
continue;
|
||||
}
|
||||
auto v = meta[*expiration_column]->type->deserialize(*cell);
|
||||
bool expired = false;
|
||||
// FIXME: don't recalculate "now" all the time
|
||||
auto now = gc_clock::now();
|
||||
if (scan_ctx.member) {
|
||||
// In this case, the expiration-time attribute we're
|
||||
// looking for is a member in a map, saved serialized
|
||||
// into bytes using Alternator's serialization (basically
|
||||
// a JSON serialized into bytes)
|
||||
// FIXME: is it possible to find a specific member of a map
|
||||
// without iterating through it like we do here and compare
|
||||
// the key?
|
||||
for (const auto& entry : value_cast<map_type_impl::native_type>(v)) {
|
||||
std::string attr_name = value_cast<sstring>(entry.first);
|
||||
if (value_cast<sstring>(entry.first) == *scan_ctx.member) {
|
||||
bytes value = value_cast<bytes>(entry.second);
|
||||
rjson::value json = deserialize_item(value);
|
||||
expired = is_expired(json, now);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For a real column to contain an expiration time, it
|
||||
// must be a numeric type.
|
||||
// FIXME: Currently we only support decimal_type (which is
|
||||
// what Alternator uses), but other numeric types can be
|
||||
// supported as well to make this feature more useful in CQL.
|
||||
// Note that kind::decimal is also checked above.
|
||||
big_decimal n = value_cast<big_decimal>(v);
|
||||
expired = is_expired(n, now);
|
||||
}
|
||||
if (expired) {
|
||||
expiration_stats.items_deleted++;
|
||||
// FIXME: maybe don't recalculate new_timestamp() all the time
|
||||
// FIXME: if expire_item() throws on timeout, we need to retry it.
|
||||
auto ts = api::new_timestamp();
|
||||
co_await expire_item(proxy, *scan_ctx.query_state_ptr, row, s, ts);
|
||||
}
|
||||
}
|
||||
// FIXME: once in a while, persist p->state(), so on reboot
|
||||
// we don't start from scratch.
|
||||
}
|
||||
}
|
||||
|
||||
// scan_table() scans, in one table, data "owned" by this shard, looking for
|
||||
// expired items and deleting them.
|
||||
// We consider each node to "own" its primary token ranges, i.e., the tokens
|
||||
// that this node is their first replica in the ring. Inside the node, each
|
||||
// shard "owns" subranges of the node's token ranges - according to the node's
|
||||
// sharding algorithm.
|
||||
// When a node goes down, the token ranges owned by it will not be scanned
|
||||
// and items in those token ranges will not expire, so in the future (FIXME)
|
||||
// this function should additionally work on token ranges whose primary owner
|
||||
// is down and this node is the range's secondary owner.
|
||||
// If the TTL (expiration-time scanning) feature is not enabled for this
|
||||
// table, scan_table() returns false without doing anything. Remember that the
|
||||
// TTL feature may be enabled later so this function will need to be called
|
||||
// again when the feature is enabled.
|
||||
// Currently this function scans the entire table (or, rather the parts owned
|
||||
// by this shard) at full rate, once. In the future (FIXME) we should consider
|
||||
// how to pace this scan, how and when to repeat it, how to interleave or
|
||||
// parallelize scanning of multiple tables, and how to continue scans after a
|
||||
// reboot.
|
||||
static future<bool> scan_table(
|
||||
service::storage_proxy& proxy,
|
||||
data_dictionary::database db,
|
||||
gms::gossiper& gossiper,
|
||||
schema_ptr s,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
{
|
||||
// Check if an expiration-time attribute is enabled for this table.
|
||||
// If not, just return false immediately.
|
||||
// FIXME: the setting of the TTL may change in the middle of a long scan!
|
||||
std::optional<std::string> attribute_name = db::find_tag(*s, TTL_TAG_KEY);
|
||||
if (!attribute_name) {
|
||||
co_return false;
|
||||
}
|
||||
// attribute_name may be one of the schema's columns (in Alternator, this
|
||||
// means it's a key column), or an element in Alternator's attrs map
|
||||
// encoded in Alternator's JSON encoding.
|
||||
// FIXME: To make this less Alternators-specific, we should encode in the
|
||||
// single key's value three things:
|
||||
// 1. The name of a column
|
||||
// 2. Optionally if column is a map, a member in the map
|
||||
// 3. The deserializer for the value: CQL or Alternator (JSON).
|
||||
// The deserializer can be guessed: If the given column or map item is
|
||||
// numeric, it can be used directly. If it is a "bytes" type, it needs to
|
||||
// be deserialized using Alternator's deserializer.
|
||||
bytes column_name = to_bytes(*attribute_name);
|
||||
const column_definition *cd = s->get_column_definition(column_name);
|
||||
std::optional<std::string> member;
|
||||
if (!cd) {
|
||||
member = std::move(attribute_name);
|
||||
column_name = bytes(executor::ATTRS_COLUMN_NAME);
|
||||
cd = s->get_column_definition(column_name);
|
||||
tlogger.info("table {} TTL enabled with attribute {} in {}", s->cf_name(), *member, executor::ATTRS_COLUMN_NAME);
|
||||
} else {
|
||||
tlogger.info("table {} TTL enabled with attribute {}", s->cf_name(), *attribute_name);
|
||||
}
|
||||
if (!cd) {
|
||||
tlogger.info("table {} TTL column is missing, not scanning", s->cf_name());
|
||||
co_return false;
|
||||
}
|
||||
data_type column_type = cd->type;
|
||||
// Verify that the column has the right type: If "member" exists
|
||||
// the column must be a map, and if it doesn't, the column must
|
||||
// (currently) be a decimal_type. If the column has the wrong type
|
||||
// nothing can get expired in this table, and it's pointless to
|
||||
// scan it.
|
||||
if ((member && column_type->get_kind() != abstract_type::kind::map) ||
|
||||
(!member && column_type->get_kind() != abstract_type::kind::decimal)) {
|
||||
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
|
||||
co_return false;
|
||||
}
|
||||
expiration_stats.scan_table++;
|
||||
// FIXME: need to pace the scan, not do it all at once.
|
||||
scan_ranges_context scan_ctx{s, proxy, std::move(column_name), std::move(member)};
|
||||
token_ranges_owned_by_this_shard<primary> my_ranges(db.real_database(), gossiper, s);
|
||||
while (std::optional<dht::partition_range> range = my_ranges.next_partition_range()) {
|
||||
// Note that because of issue #9167 we need to run a separate
|
||||
// query on each partition range, and can't pass several of
|
||||
// them into one partition_range_vector.
|
||||
dht::partition_range_vector partition_ranges;
|
||||
partition_ranges.push_back(std::move(*range));
|
||||
// FIXME: if scanning a single range fails, including network errors,
|
||||
// we fail the entire scan (and rescan from the beginning). Need to
|
||||
// reconsider this. Saving the scan position might be a good enough
|
||||
// solution for this problem.
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
}
|
||||
// If each node only scans its own primary ranges, then when any node is
|
||||
// down part of the token range will not get scanned. This can be viewed
|
||||
// as acceptable (when the comes back online, it will resume its scan),
|
||||
// but as noted in issue #9787, we can allow more prompt expiration
|
||||
// by tasking another node to take over scanning of the dead node's primary
|
||||
// ranges. What we do here is that this node will also check expiration
|
||||
// on its *secondary* ranges - but only those whose primary owner is down.
|
||||
token_ranges_owned_by_this_shard<secondary> my_secondary_ranges(db.real_database(), gossiper, s);
|
||||
while (std::optional<dht::partition_range> range = my_secondary_ranges.next_partition_range()) {
|
||||
expiration_stats.secondary_ranges_scanned++;
|
||||
dht::partition_range_vector partition_ranges;
|
||||
partition_ranges.push_back(std::move(*range));
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
}
|
||||
co_return true;
|
||||
}
|
||||
|
||||
|
||||
future<> expiration_service::run() {
|
||||
// FIXME: don't just tight-loop, think about timing, pace, and
|
||||
// store position in durable storage, etc.
|
||||
// FIXME: think about working on different tables in parallel.
|
||||
// also need to notice when a new table is added, a table is
|
||||
// deleted or when ttl is enabled or disabled for a table!
|
||||
for (;;) {
|
||||
auto start = lowres_clock::now();
|
||||
// _db.tables() may change under our feet during a
|
||||
// long-living loop, so we must keep our own copy of the list of
|
||||
// schemas.
|
||||
std::vector<schema_ptr> schemas;
|
||||
for (auto cf : _db.get_tables()) {
|
||||
schemas.push_back(cf.schema());
|
||||
}
|
||||
for (schema_ptr s : schemas) {
|
||||
co_await coroutine::maybe_yield();
|
||||
if (shutting_down()) {
|
||||
co_return;
|
||||
}
|
||||
try {
|
||||
co_await scan_table(_proxy, _db, _gossiper, s, _abort_source, _page_sem, _expiration_stats);
|
||||
} catch (...) {
|
||||
// The scan of a table may fail in the middle for many
|
||||
// reasons, including network failure and even the table
|
||||
// being removed. We'll continue scanning this table later
|
||||
// (if it still exists). In any case it's important to catch
|
||||
// the exception and not let the scanning service die for
|
||||
// good.
|
||||
// If the table has been deleted, it is expected that the scan
|
||||
// will fail at some point, and even a warning is excessive.
|
||||
if (_db.has_schema(s->ks_name(), s->cf_name())) {
|
||||
tlogger.warn("table {}.{} expiration scan failed: {}",
|
||||
s->ks_name(), s->cf_name(), std::current_exception());
|
||||
} else {
|
||||
tlogger.info("expiration scan failed when table {}.{} was deleted",
|
||||
s->ks_name(), s->cf_name());
|
||||
}
|
||||
}
|
||||
}
|
||||
_expiration_stats.scan_passes++;
|
||||
// The TTL scanner runs above once over all tables, at full steam.
|
||||
// After completing such a scan, we sleep until it's time start
|
||||
// another scan. TODO: If the scan went too fast, we can slow it down
|
||||
// in the next iteration by reducing the scanner's scheduling-group
|
||||
// share (if using a separate scheduling group), or introduce
|
||||
// finer-grain sleeps into the scanning code.
|
||||
std::chrono::milliseconds scan_duration(std::chrono::duration_cast<std::chrono::milliseconds>(lowres_clock::now() - start));
|
||||
std::chrono::milliseconds period(long(_db.get_config().alternator_ttl_period_in_seconds() * 1000));
|
||||
if (scan_duration < period) {
|
||||
try {
|
||||
tlogger.info("sleeping {} seconds until next period", (period - scan_duration).count()/1000.0);
|
||||
co_await seastar::sleep_abortable(period - scan_duration, _abort_source);
|
||||
} catch(seastar::sleep_aborted&) {}
|
||||
} else {
|
||||
tlogger.warn("scan took {} seconds, longer than period - not sleeping", scan_duration.count()/1000.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
future<> expiration_service::start() {
|
||||
// Called by main() on each shard to start the expiration-service
|
||||
// thread. Just runs run() in the background and allows stop().
|
||||
if (_db.features().alternator_ttl) {
|
||||
if (!shutting_down()) {
|
||||
_end = run().handle_exception([] (std::exception_ptr ep) {
|
||||
tlogger.error("expiration_service failed: {}", ep);
|
||||
});
|
||||
}
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> expiration_service::stop() {
|
||||
if (_abort_source.abort_requested()) {
|
||||
throw std::logic_error("expiration_service::stop() called a second time");
|
||||
}
|
||||
_abort_source.request_abort();
|
||||
if (!_end) {
|
||||
// if _end is was not set, start() was never called
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return std::move(*_end);
|
||||
}
|
||||
|
||||
expiration_service::stats::stats() {
|
||||
_metrics.add_group("expiration", {
|
||||
seastar::metrics::make_total_operations("scan_passes", scan_passes,
|
||||
seastar::metrics::description("number of passes over the database")),
|
||||
seastar::metrics::make_total_operations("scan_table", scan_table,
|
||||
seastar::metrics::description("number of table scans (counting each scan of each table that enabled expiration)")),
|
||||
seastar::metrics::make_total_operations("items_deleted", items_deleted,
|
||||
seastar::metrics::description("number of items deleted after expiration")),
|
||||
seastar::metrics::make_total_operations("secondary_ranges_scanned", secondary_ranges_scanned,
|
||||
seastar::metrics::description("number of token ranges scanned by this node while their primary owner was down")),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
} // namespace alternator
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <seastar/core/semaphore.hh>
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
|
||||
namespace gms {
|
||||
class gossiper;
|
||||
}
|
||||
|
||||
namespace replica {
|
||||
class database;
|
||||
}
|
||||
|
||||
namespace service {
|
||||
class storage_proxy;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// expiration_service is a sharded service responsible for cleaning up expired
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLeave request.
|
||||
class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
|
||||
public:
|
||||
// Object holding per-shard statistics related to the expiration service.
|
||||
// While this object is alive, these metrics are also registered to be
|
||||
// visible by the metrics REST API, with the "expiration_" prefix.
|
||||
class stats {
|
||||
public:
|
||||
stats();
|
||||
uint64_t scan_passes = 0;
|
||||
uint64_t scan_table = 0;
|
||||
uint64_t items_deleted = 0;
|
||||
uint64_t secondary_ranges_scanned = 0;
|
||||
private:
|
||||
// The metric_groups object holds this stat object's metrics registered
|
||||
// as long as the stats object is alive.
|
||||
seastar::metrics::metric_groups _metrics;
|
||||
};
|
||||
private:
|
||||
data_dictionary::database _db;
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
// _end is set by start(), and resolves when the the background service
|
||||
// started by it ends. To ask the background service to end, _abort_source
|
||||
// should be triggered. stop() below uses both _abort_source and _end.
|
||||
std::optional<future<>> _end;
|
||||
abort_source _abort_source;
|
||||
// Ensures that at most 1 page of scan results at a time is processed by the TTL service
|
||||
named_semaphore _page_sem{1, named_semaphore_exception_factory{"alternator_ttl"}};
|
||||
bool shutting_down() { return _abort_source.abort_requested(); }
|
||||
stats _expiration_stats;
|
||||
public:
|
||||
// sharded_service<expiration_service>::start() creates this object on
|
||||
// all shards, so calls this constructor on each shard. Later, the
|
||||
// additional start() function should be invoked on all shards.
|
||||
expiration_service(data_dictionary::database, service::storage_proxy&, gms::gossiper&);
|
||||
future<> start();
|
||||
future<> run();
|
||||
// sharded_service<expiration_service>::stop() calls the following stop()
|
||||
// method on each shard. This stop() asks the service on this shard to
|
||||
// shut down as quickly as it can. The returned future indicates when the
|
||||
// service is no longer running.
|
||||
// stop() may be called even before start(), but may only be called once -
|
||||
// calling it twice will result in an exception.
|
||||
future<> stop();
|
||||
};
|
||||
|
||||
} // namespace alternator
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/authorization_cache",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/authorization_cache/reset",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Reset cache",
|
||||
"type":"void",
|
||||
"nickname":"authorization_cache_reset",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
}
|
||||
}
|
||||
@@ -102,47 +102,7 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"type",
|
||||
"description":"The type of compaction to stop. Can be one of: COMPACTION | CLEANUP | SCRUB | UPGRADE | RESHAPE",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/compaction_manager/stop_keyspace_compaction/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Stop all running compaction-like tasks in the given keyspace and tables having the provided type.",
|
||||
"type":"void",
|
||||
"nickname":"stop_keyspace_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to stop compaction in",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"tables",
|
||||
"description":"Comma-separated tables to stop compaction in",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"type",
|
||||
"description":"The type of compaction to stop. Can be one of: COMPACTION | CLEANUP | SCRUB | UPGRADE | RESHAPE",
|
||||
"description":"the type of compaction to stop. Can be one of: - COMPACTION - VALIDATION - CLEANUP - SCRUB - INDEX_BUILD",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
|
||||
@@ -624,7 +624,7 @@
|
||||
},
|
||||
{
|
||||
"name":"kn",
|
||||
"description":"Keyspace(s) to snapshot. Multiple keyspaces can be provided using a comma-separated list. If omitted, snapshot all keyspaces.",
|
||||
"description":"Comma seperated keyspaces name to snapshot",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -632,7 +632,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Table(s) to snapshot. Multiple tables (in a single keyspace) can be provided using a comma-separated list. If omitted, snapshot all tables in the given keyspace(s).",
|
||||
"description":"the column family to snapshot",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -667,7 +667,7 @@
|
||||
},
|
||||
{
|
||||
"name":"kn",
|
||||
"description":"Comma-separated keyspaces name that their snapshot will be deleted",
|
||||
"description":"Comma seperated keyspaces name that their snapshot will be deleted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -723,7 +723,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -755,39 +755,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/keyspace_offstrategy_compaction/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Perform offstrategy compaction, if needed, in a single keyspace",
|
||||
"type":"boolean",
|
||||
"nickname":"perform_keyspace_offstrategy_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to operate on",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -839,19 +807,6 @@
|
||||
],
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"quarantine_mode",
|
||||
"description":"Controls whether to scrub quarantined sstables (default INCLUDE)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"INCLUDE",
|
||||
"EXCLUDE",
|
||||
"ONLY"
|
||||
],
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
@@ -862,7 +817,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -902,7 +857,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -934,7 +889,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -1228,7 +1183,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Removes a node from the cluster. Replicated data that logically belonged to this node is redistributed among the remaining nodes.",
|
||||
"summary":"Removes token (and all data associated with enpoint that had it) from the ring",
|
||||
"type":"void",
|
||||
"nickname":"remove_node",
|
||||
"produces":[
|
||||
@@ -1245,7 +1200,7 @@
|
||||
},
|
||||
{
|
||||
"name":"ignore_nodes",
|
||||
"description":"Comma-separated list of dead nodes to ignore in removenode operation. Use the same method for all nodes to ignore: either Host IDs or ip addresses.",
|
||||
"description":"List of dead nodes to ingore in removenode operation",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2073,7 +2028,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2100,7 +2055,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2641,7 +2596,7 @@
|
||||
"version":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"ka", "la", "mc", "md", "me"
|
||||
"ka", "la", "mc", "md"
|
||||
],
|
||||
"description":"SSTable version"
|
||||
},
|
||||
|
||||
@@ -52,45 +52,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/log",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Write a message to the Scylla log",
|
||||
"type":"void",
|
||||
"nickname":"write_log_message",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"message",
|
||||
"description":"The message to write to the log",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"level",
|
||||
"description":"The logging level to use",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"error",
|
||||
"warn",
|
||||
"info",
|
||||
"debug",
|
||||
"trace"
|
||||
],
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/drop_sstable_caches",
|
||||
"operations":[
|
||||
|
||||
@@ -1,251 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/task_manager",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/task_manager/list_modules",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all modules names",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"nickname":"get_modules",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/list_module_tasks/{module}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get a list of tasks",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"task_stats"
|
||||
},
|
||||
"nickname":"get_tasks",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"module",
|
||||
"description":"The module to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"internal",
|
||||
"description":"Boolean flag indicating whether internal tasks should be shown (false by default)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"table",
|
||||
"description":"The table to query about",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/task_status/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get task status",
|
||||
"type":"task_status",
|
||||
"nickname":"get_task_status",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/abort_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Abort running task and its descendants",
|
||||
"type":"void",
|
||||
"nickname":"abort_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to abort",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/wait_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Wait for a task to complete",
|
||||
"type":"task_status",
|
||||
"nickname":"wait_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to wait for",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
"task_stats" :{
|
||||
"id": "task_stats",
|
||||
"description":"A task statistics object",
|
||||
"properties":{
|
||||
"task_id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of a task"
|
||||
},
|
||||
"state":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of a task"
|
||||
}
|
||||
}
|
||||
},
|
||||
"task_status":{
|
||||
"id":"task_status",
|
||||
"description":"A task status object",
|
||||
"properties":{
|
||||
"id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of the task"
|
||||
},
|
||||
"type":{
|
||||
"type":"string",
|
||||
"description":"The description of the task"
|
||||
},
|
||||
"state":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of the task"
|
||||
},
|
||||
"is_abortable":{
|
||||
"type":"boolean",
|
||||
"description":"Boolean flag indicating whether the task can be aborted"
|
||||
},
|
||||
"start_time":{
|
||||
"type":"datetime",
|
||||
"description":"The start time of the task"
|
||||
},
|
||||
"end_time":{
|
||||
"type":"datetime",
|
||||
"description":"The end time of the task (unspecified when the task is not completed)"
|
||||
},
|
||||
"error":{
|
||||
"type":"string",
|
||||
"description":"Error string, if the task failed"
|
||||
},
|
||||
"parent_id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of the parent task"
|
||||
},
|
||||
"sequence_number":{
|
||||
"type":"long",
|
||||
"description":"The running sequence number of the task"
|
||||
},
|
||||
"shard":{
|
||||
"type":"long",
|
||||
"description":"The number of a shard the task is running on"
|
||||
},
|
||||
"keyspace":{
|
||||
"type":"string",
|
||||
"description":"The keyspace the task is working on (if applicable)"
|
||||
},
|
||||
"table":{
|
||||
"type":"string",
|
||||
"description":"The table the task is working on (if applicable)"
|
||||
},
|
||||
"entity":{
|
||||
"type":"string",
|
||||
"description":"Task-specific entity description"
|
||||
},
|
||||
"progress_units":{
|
||||
"type":"string",
|
||||
"description":"A description of the progress units"
|
||||
},
|
||||
"progress_total":{
|
||||
"type":"double",
|
||||
"description":"The total number of units to complete for the task"
|
||||
},
|
||||
"progress_completed":{
|
||||
"type":"double",
|
||||
"description":"The number of units completed so far"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/task_manager_test",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/task_manager_test/test_module",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Register test module in task manager",
|
||||
"type":"void",
|
||||
"nickname":"register_test_module",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Unregister test module in task manager",
|
||||
"type":"void",
|
||||
"nickname":"unregister_test_module",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/test_task",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Register test task",
|
||||
"type":"string",
|
||||
"nickname":"register_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to register",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"shard",
|
||||
"description":"The shard of the task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"parent_id",
|
||||
"description":"The uuid of a parent task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace the task is working on",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"table",
|
||||
"description":"The table the task is working on",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"type",
|
||||
"description":"The type of the task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"entity",
|
||||
"description":"Task-specific entity description",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Unregister test task",
|
||||
"type":"void",
|
||||
"nickname":"unregister_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to register",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/finish_test_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Finish test task",
|
||||
"type":"void",
|
||||
"nickname":"finish_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to finish",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"error",
|
||||
"description":"The error with which task fails (if it does)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/ttl",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Set ttl in seconds and get last value",
|
||||
"type":"long",
|
||||
"nickname":"get_and_update_ttl",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"ttl",
|
||||
"description":"The number of seconds for which the tasks will be kept in memory after it finishes",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
108
api/api.cc
108
api/api.cc
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api.hh"
|
||||
@@ -24,13 +37,10 @@
|
||||
#include "compaction_manager.hh"
|
||||
#include "hinted_handoff.hh"
|
||||
#include "error_injection.hh"
|
||||
#include "authorization_cache.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "stream_manager.hh"
|
||||
#include "system.hh"
|
||||
#include "api/config.hh"
|
||||
#include "task_manager.hh"
|
||||
#include "task_manager_test.hh"
|
||||
|
||||
logging::logger apilog("api");
|
||||
|
||||
@@ -39,7 +49,7 @@ namespace api {
|
||||
static std::unique_ptr<reply> exception_reply(std::exception_ptr eptr) {
|
||||
try {
|
||||
std::rethrow_exception(eptr);
|
||||
} catch (const replica::no_such_keyspace& ex) {
|
||||
} catch (const no_such_keyspace& ex) {
|
||||
throw bad_param_exception(ex.what());
|
||||
}
|
||||
// We never going to get here
|
||||
@@ -99,9 +109,9 @@ future<> unset_rpc_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_rpc_controller(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ks) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &g, &cdc_gs, &sys_ks] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, g.local(), cdc_gs, sys_ks);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &g, &cdc_gs] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, g.local(), cdc_gs);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -129,17 +139,6 @@ future<> unset_server_repair(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_repair(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_authorization_cache(http_context &ctx, sharded<auth::service> &auth_service) {
|
||||
return register_api(ctx, "authorization_cache",
|
||||
"The authorization cache API", [&auth_service] (http_context &ctx, routes &r) {
|
||||
set_authorization_cache(ctx, r, auth_service);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_authorization_cache(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_authorization_cache(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &snap_ctl] (routes& r) { set_snapshot(ctx, r, snap_ctl); });
|
||||
}
|
||||
@@ -148,14 +147,8 @@ future<> unset_server_snapshot(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_snapshot(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", [&snitch] (http_context& ctx, routes& r) {
|
||||
set_endpoint_snitch(ctx, r, snitch);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_snitch(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_endpoint_snitch(ctx, r); });
|
||||
future<> set_server_snitch(http_context& ctx) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", set_endpoint_snitch);
|
||||
}
|
||||
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g) {
|
||||
@@ -187,15 +180,9 @@ future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_se
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm) {
|
||||
future<> set_server_stream_manager(http_context& ctx) {
|
||||
return register_api(ctx, "stream_manager",
|
||||
"The stream manager API", [&sm] (http_context& ctx, routes& r) {
|
||||
set_stream_manager(ctx, r, sm);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_stream_manager(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_stream_manager(ctx, r); });
|
||||
"The stream manager API", set_stream_manager);
|
||||
}
|
||||
|
||||
future<> set_server_cache(http_context& ctx) {
|
||||
@@ -253,56 +240,5 @@ future<> set_server_done(http_context& ctx) {
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_task_manager(http_context& ctx) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx](routes& r) {
|
||||
rb->register_function(r, "task_manager",
|
||||
"The task manager API");
|
||||
set_task_manager(ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
future<> set_server_task_manager_test(http_context& ctx, lw_shared_ptr<db::config> cfg) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &cfg = *cfg](routes& r) mutable {
|
||||
rb->register_function(r, "task_manager_test",
|
||||
"The task manager test API");
|
||||
set_task_manager_test(ctx, r, cfg);
|
||||
});
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void req_params::process(const request& req) {
|
||||
// Process mandatory parameters
|
||||
for (auto& [name, ent] : params) {
|
||||
if (!ent.is_mandatory) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
ent.value = req.param[name];
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Mandatory parameter '{}' was not provided", name));
|
||||
}
|
||||
}
|
||||
|
||||
// Process optional parameters
|
||||
for (auto& [name, value] : req.query_parameters) {
|
||||
try {
|
||||
auto& ent = params.at(name);
|
||||
if (ent.is_mandatory) {
|
||||
throw httpd::bad_param_exception(fmt::format("Parameter '{}' is expected to be provided as part of the request url", name));
|
||||
}
|
||||
ent.value = value;
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Unsupported optional parameter '{}'", name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
91
api/api.hh
91
api/api.hh
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -81,6 +94,13 @@ inline std::vector<sstring> split(const sstring& text, const char* separator) {
|
||||
return boost::split(tokens, text, boost::is_any_of(separator));
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a column family parameter
|
||||
*/
|
||||
inline std::vector<sstring> split_cf(const sstring& cf) {
|
||||
return split(cf, ",");
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper function to sum values on an a distributed object that
|
||||
* has a get_stats method.
|
||||
@@ -137,14 +157,6 @@ future<json::json_return_type> sum_timer_stats(distributed<T>& d, utils::timed_
|
||||
});
|
||||
}
|
||||
|
||||
template<class T, class F>
|
||||
future<json::json_return_type> sum_timer_stats(distributed<T>& d, utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
return d.map_reduce0([f](const T& p) {return (p.get_stats().*f).rate();}, utils::rate_moving_average_and_histogram(),
|
||||
std::plus<utils::rate_moving_average_and_histogram>()).then([](const utils::rate_moving_average_and_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(timer_to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
inline int64_t min_int64(int64_t a, int64_t b) {
|
||||
return std::min(a,b);
|
||||
}
|
||||
@@ -245,67 +257,6 @@ public:
|
||||
operator T() const { return value; }
|
||||
};
|
||||
|
||||
using mandatory = bool_class<struct mandatory_tag>;
|
||||
|
||||
class req_params {
|
||||
public:
|
||||
struct def {
|
||||
std::optional<sstring> value;
|
||||
mandatory is_mandatory = mandatory::no;
|
||||
|
||||
def(std::optional<sstring> value_ = std::nullopt, mandatory is_mandatory_ = mandatory::no)
|
||||
: value(std::move(value_))
|
||||
, is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
|
||||
def(mandatory is_mandatory_)
|
||||
: is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
};
|
||||
|
||||
private:
|
||||
std::unordered_map<sstring, def> params;
|
||||
|
||||
public:
|
||||
req_params(std::initializer_list<std::pair<sstring, def>> l) {
|
||||
for (const auto& [name, ent] : l) {
|
||||
add(std::move(name), std::move(ent));
|
||||
}
|
||||
}
|
||||
|
||||
void add(sstring name, def ent) {
|
||||
params.emplace(std::move(name), std::move(ent));
|
||||
}
|
||||
|
||||
void process(const request& req);
|
||||
|
||||
const std::optional<sstring>& get(const char* name) const {
|
||||
return params.at(name).value;
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
const std::optional<T> get_as(const char* name) const {
|
||||
return get(name);
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
requires std::same_as<T, bool>
|
||||
const std::optional<bool> get_as(const char* name) const {
|
||||
auto value = get(name);
|
||||
if (!value) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::transform(value->begin(), value->end(), value->begin(), ::tolower);
|
||||
if (value == "true" || value == "yes" || value == "1") {
|
||||
return true;
|
||||
}
|
||||
if (value == "false" || value == "no" || value == "0") {
|
||||
return false;
|
||||
}
|
||||
throw boost::bad_lexical_cast{};
|
||||
}
|
||||
};
|
||||
|
||||
utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimated_histogram& val);
|
||||
|
||||
}
|
||||
|
||||
@@ -3,15 +3,27 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include <seastar/core/future.hh>
|
||||
|
||||
#include "replica/database_fwd.hh"
|
||||
#include "tasks/task_manager.hh"
|
||||
#include "database_fwd.hh"
|
||||
#include "seastarx.hh"
|
||||
|
||||
namespace service {
|
||||
@@ -24,15 +36,10 @@ class storage_service;
|
||||
|
||||
class sstables_loader;
|
||||
|
||||
namespace streaming {
|
||||
class stream_manager;
|
||||
}
|
||||
|
||||
namespace locator {
|
||||
|
||||
class token_metadata;
|
||||
class shared_token_metadata;
|
||||
class snitch_ptr;
|
||||
|
||||
} // namespace locator
|
||||
|
||||
@@ -44,7 +51,6 @@ class config;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
@@ -56,24 +62,21 @@ class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth { class service; }
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context {
|
||||
sstring api_dir;
|
||||
sstring api_doc;
|
||||
httpd::http_server_control http_server;
|
||||
distributed<replica::database>& db;
|
||||
distributed<database>& db;
|
||||
distributed<service::storage_proxy>& sp;
|
||||
service::load_meter& lmeter;
|
||||
const sharded<locator::shared_token_metadata>& shared_token_metadata;
|
||||
sharded<tasks::task_manager>& tm;
|
||||
|
||||
http_context(distributed<replica::database>& _db,
|
||||
http_context(distributed<database>& _db,
|
||||
distributed<service::storage_proxy>& _sp,
|
||||
service::load_meter& _lm, const sharded<locator::shared_token_metadata>& _stm, sharded<tasks::task_manager>& _tm)
|
||||
: db(_db), sp(_sp), lmeter(_lm), shared_token_metadata(_stm), tm(_tm) {
|
||||
service::load_meter& _lm, const sharded<locator::shared_token_metadata>& _stm)
|
||||
: db(_db), sp(_sp), lmeter(_lm), shared_token_metadata(_stm) {
|
||||
}
|
||||
|
||||
const locator::token_metadata& get_token_metadata();
|
||||
@@ -81,9 +84,8 @@ struct http_context {
|
||||
|
||||
future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg);
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
|
||||
future<> unset_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ks);
|
||||
future<> set_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs);
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
||||
future<> unset_server_sstables_loader(http_context& ctx);
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb);
|
||||
@@ -94,8 +96,6 @@ future<> set_transport_controller(http_context& ctx, cql_transport::controller&
|
||||
future<> unset_transport_controller(http_context& ctx);
|
||||
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl);
|
||||
future<> unset_rpc_controller(http_context& ctx);
|
||||
future<> set_server_authorization_cache(http_context& ctx, sharded<auth::service> &auth_service);
|
||||
future<> unset_server_authorization_cache(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
future<> unset_server_snapshot(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
@@ -103,15 +103,12 @@ future<> set_server_load_sstable(http_context& ctx);
|
||||
future<> set_server_messaging_service(http_context& ctx, sharded<netw::messaging_service>& ms);
|
||||
future<> unset_server_messaging_service(http_context& ctx);
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_service>& ss);
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm);
|
||||
future<> unset_server_stream_manager(http_context& ctx);
|
||||
future<> set_server_stream_manager(http_context& ctx);
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> unset_hinted_handoff(http_context& ctx);
|
||||
future<> set_server_gossip_settle(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> set_server_cache(http_context& ctx);
|
||||
future<> set_server_compaction_manager(http_context& ctx);
|
||||
future<> set_server_done(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx);
|
||||
future<> set_server_task_manager_test(http_context& ctx, lw_shared_ptr<db::config> cfg);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "api/api-doc/authorization_cache.json.hh"
|
||||
|
||||
#include "api/authorization_cache.hh"
|
||||
#include "api/api.hh"
|
||||
#include "auth/common.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
|
||||
void set_authorization_cache(http_context& ctx, routes& r, sharded<auth::service> &auth_service) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.set(r, [&auth_service] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
co_await auth_service.invoke_on_all([] (auth::service& auth) -> future<> {
|
||||
auth.reset_authorization_cache();
|
||||
return make_ready_future<>();
|
||||
});
|
||||
|
||||
co_return json_void();
|
||||
});
|
||||
}
|
||||
|
||||
void unset_authorization_cache(http_context& ctx, routes& r) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_authorization_cache(http_context& ctx, routes& r, sharded<auth::service> &auth_service);
|
||||
void unset_authorization_cache(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cache_service.hh"
|
||||
@@ -195,7 +208,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cs::get_row_capacity.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().region().occupancy().used_space();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
@@ -203,26 +216,26 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.count();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_requests.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.count() + cf.get_row_cache().stats().misses.count();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_hit_rate.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [](const column_family& cf) {
|
||||
return ratio_holder(cf.get_row_cache().stats().hits.count() + cf.get_row_cache().stats().misses.count(),
|
||||
cf.get_row_cache().stats().hits.count());
|
||||
}, std::plus<ratio_holder>());
|
||||
});
|
||||
|
||||
cs::get_row_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -230,7 +243,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cs::get_row_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate() + cf.get_row_cache().stats().misses.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -240,7 +253,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// In origin row size is the weighted size.
|
||||
// We currently do not support weights, so we use num entries instead
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
@@ -248,7 +261,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return ctx.db.map_reduce0([](database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "collectd.hh"
|
||||
@@ -29,11 +42,8 @@ static auto transformer(const std::vector<collectd_value>& values) {
|
||||
case scollectd::data_type::GAUGE:
|
||||
collected_value.values.push(v.d());
|
||||
break;
|
||||
case scollectd::data_type::COUNTER:
|
||||
collected_value.values.push(v.ui());
|
||||
break;
|
||||
case scollectd::data_type::REAL_COUNTER:
|
||||
collected_value.values.push(v.d());
|
||||
case scollectd::data_type::DERIVE:
|
||||
collected_value.values.push(v.i());
|
||||
break;
|
||||
default:
|
||||
collected_value.values.push(v.ui());
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "column_family.hh"
|
||||
@@ -14,7 +27,7 @@
|
||||
#include "sstables/metadata_collector.hh"
|
||||
#include "utils/estimated_histogram.hh"
|
||||
#include <algorithm>
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "db/system_keyspace_view_types.hh"
|
||||
#include "db/data_listeners.hh"
|
||||
#include "storage_service.hh"
|
||||
#include "unimplemented.hh"
|
||||
@@ -43,57 +56,57 @@ std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name) {
|
||||
return std::make_tuple(name.substr(0, pos), name.substr(end));
|
||||
}
|
||||
|
||||
const table_id& get_uuid(const sstring& ks, const sstring& cf, const replica::database& db) {
|
||||
const utils::UUID& get_uuid(const sstring& ks, const sstring& cf, const database& db) {
|
||||
try {
|
||||
return db.find_uuid(ks, cf);
|
||||
} catch (replica::no_such_column_family& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception(format("Column family '{}:{}' not found", ks, cf));
|
||||
}
|
||||
}
|
||||
|
||||
const table_id& get_uuid(const sstring& name, const replica::database& db) {
|
||||
const utils::UUID& get_uuid(const sstring& name, const database& db) {
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(name);
|
||||
return get_uuid(ks, cf, db);
|
||||
}
|
||||
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, function<void(replica::column_family&)> f) {
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, function<void(column_family&)> f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
|
||||
return ctx.db.invoke_on_all([f, uuid](replica::database& db) {
|
||||
return ctx.db.invoke_on_all([f, uuid](database& db) {
|
||||
f(db.find_column_family(uuid));
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
|
||||
int64_t replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const replica::column_family& cf) {
|
||||
int64_t column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const column_family& cf) {
|
||||
return cf.get_stats().*f;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
int64_t replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const replica::column_family& cf) {
|
||||
int64_t column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const column_family& cf) {
|
||||
return cf.get_stats().*f;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const replica::column_family& cf) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const column_family& cf) {
|
||||
return (cf.get_stats().*f).hist.count;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([uuid, f](replica::database& db) {
|
||||
return ctx.db.map_reduce0([uuid, f](database& db) {
|
||||
// Histograms information is sample of the actual load
|
||||
// so to get an estimation of sum, we multiply the mean
|
||||
// with count. The information is gather in nano second,
|
||||
// but reported in micro
|
||||
replica::column_family& cf = db.find_column_family(uuid);
|
||||
column_family& cf = db.find_column_family(uuid);
|
||||
return ((cf.get_stats().*f).hist.count/1000.0) * (cf.get_stats().*f).hist.mean;
|
||||
}, 0.0, std::plus<double>()).then([](double res) {
|
||||
return make_ready_future<json::json_return_type>((int64_t)res);
|
||||
@@ -102,16 +115,16 @@ static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const
|
||||
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const replica::column_family& cf) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const column_family& cf) {
|
||||
return (cf.get_stats().*f).hist.count;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
std::plus<utils::ihistogram>())
|
||||
@@ -120,20 +133,8 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, const
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
std::plus<utils::ihistogram>())
|
||||
.then([](const utils::ihistogram& val) {
|
||||
return make_ready_future<json::json_return_type>(to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
std::function<utils::ihistogram(const replica::database&)> fun = [f] (const replica::database& db) {
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
std::function<utils::ihistogram(const database&)> fun = [f] (const database& db) {
|
||||
utils::ihistogram res;
|
||||
for (auto i : db.get_column_families()) {
|
||||
res += (i.second->get_stats().*f).hist;
|
||||
@@ -148,9 +149,9 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils:
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).rate();},
|
||||
utils::rate_moving_average_and_histogram(),
|
||||
std::plus<utils::rate_moving_average_and_histogram>())
|
||||
@@ -159,8 +160,8 @@ static future<json::json_return_type> get_cf_rate_and_histogram(http_context& c
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
std::function<utils::rate_moving_average_and_histogram(const replica::database&)> fun = [f] (const replica::database& db) {
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram column_family_stats::*f) {
|
||||
std::function<utils::rate_moving_average_and_histogram(const database&)> fun = [f] (const database& db) {
|
||||
utils::rate_moving_average_and_histogram res;
|
||||
for (auto i : db.get_column_families()) {
|
||||
res += (i.second->get_stats().*f).rate();
|
||||
@@ -175,12 +176,12 @@ static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ct
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_unleveled_sstables(http_context& ctx, const sstring& name) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [](const column_family& cf) {
|
||||
return cf.get_unleveled_sstables();
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static int64_t min_partition_size(replica::column_family& cf) {
|
||||
static int64_t min_partition_size(column_family& cf) {
|
||||
int64_t res = INT64_MAX;
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res = std::min(res, i->get_stats_metadata().estimated_partition_size.min());
|
||||
@@ -188,7 +189,7 @@ static int64_t min_partition_size(replica::column_family& cf) {
|
||||
return (res == INT64_MAX) ? 0 : res;
|
||||
}
|
||||
|
||||
static int64_t max_partition_size(replica::column_family& cf) {
|
||||
static int64_t max_partition_size(column_family& cf) {
|
||||
int64_t res = 0;
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res = std::max(i->get_stats_metadata().estimated_partition_size.max(), res);
|
||||
@@ -196,7 +197,7 @@ static int64_t max_partition_size(replica::column_family& cf) {
|
||||
return res;
|
||||
}
|
||||
|
||||
static integral_ratio_holder mean_partition_size(replica::column_family& cf) {
|
||||
static integral_ratio_holder mean_partition_size(column_family& cf) {
|
||||
integral_ratio_holder res;
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
auto c = i->get_stats_metadata().estimated_partition_size.count();
|
||||
@@ -222,7 +223,7 @@ static json::json_return_type sum_map(const std::unordered_map<sstring, uint64_t
|
||||
|
||||
static future<json::json_return_type> sum_sstable(http_context& ctx, const sstring name, bool total) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([uuid, total](replica::database& db) {
|
||||
return ctx.db.map_reduce0([uuid, total](database& db) {
|
||||
std::unordered_map<sstring, uint64_t> m;
|
||||
auto sstables = (total) ? db.find_column_family(uuid).get_sstables_including_compacted_undeleted() :
|
||||
db.find_column_family(uuid).get_sstables();
|
||||
@@ -238,7 +239,7 @@ static future<json::json_return_type> sum_sstable(http_context& ctx, const sstr
|
||||
|
||||
|
||||
static future<json::json_return_type> sum_sstable(http_context& ctx, bool total) {
|
||||
return map_reduce_cf_raw(ctx, std::unordered_map<sstring, uint64_t>(), [total](replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, std::unordered_map<sstring, uint64_t>(), [total](column_family& cf) {
|
||||
std::unordered_map<sstring, uint64_t> m;
|
||||
auto sstables = (total) ? cf.get_sstables_including_compacted_undeleted() :
|
||||
cf.get_sstables();
|
||||
@@ -251,7 +252,7 @@ static future<json::json_return_type> sum_sstable(http_context& ctx, bool total)
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> map_reduce_cf_time_histogram(http_context& ctx, const sstring& name, std::function<utils::time_estimated_histogram(const replica::column_family&)> f) {
|
||||
future<json::json_return_type> map_reduce_cf_time_histogram(http_context& ctx, const sstring& name, std::function<utils::time_estimated_histogram(const column_family&)> f) {
|
||||
return map_reduce_cf_raw(ctx, name, utils::time_estimated_histogram(), f, utils::time_estimated_histogram_merge).then([](const utils::time_estimated_histogram& res) {
|
||||
return make_ready_future<json::json_return_type>(time_to_json_histogram(res));
|
||||
});
|
||||
@@ -274,7 +275,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static double get_compression_ratio(replica::column_family& cf) {
|
||||
static double get_compression_ratio(column_family& cf) {
|
||||
sum_ratio<double> result;
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
auto compression_ratio = i->get_compression_ratio();
|
||||
@@ -333,13 +334,13 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_memtable_columns_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t{0}, [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t{0}, [](column_family& cf) {
|
||||
return cf.active_memtable().partition_count();
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
cf::get_all_memtable_columns_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t{0}, [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t{0}, [](column_family& cf) {
|
||||
return cf.active_memtable().partition_count();
|
||||
}, std::plus<>());
|
||||
});
|
||||
@@ -353,25 +354,25 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_memtable_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
|
||||
return cf.active_memtable().region().occupancy().total_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_memtable_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
|
||||
return cf.active_memtable().region().occupancy().total_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_memtable_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
|
||||
return cf.active_memtable().region().occupancy().used_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_memtable_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
|
||||
return cf.active_memtable().region().occupancy().used_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
@@ -386,15 +387,15 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
cf::get_cf_all_memtables_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
|
||||
return cf.occupancy().total_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_cf_all_memtables_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return ctx.db.map_reduce0([](const replica::database& db){
|
||||
return db.dirty_memory_region_group().real_memory_used();
|
||||
return ctx.db.map_reduce0([](const database& db){
|
||||
return db.dirty_memory_region_group().memory_used();
|
||||
}, int64_t(0), std::plus<int64_t>()).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
@@ -402,29 +403,29 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
cf::get_cf_all_memtables_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
|
||||
return cf.occupancy().used_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_cf_all_memtables_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
|
||||
return cf.active_memtable().region().occupancy().used_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_memtable_switch_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx,req->param["name"] ,&replica::column_family_stats::memtable_switch_count);
|
||||
return get_cf_stats(ctx,req->param["name"] ,&column_family_stats::memtable_switch_count);
|
||||
});
|
||||
|
||||
cf::get_all_memtable_switch_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &replica::column_family_stats::memtable_switch_count);
|
||||
return get_cf_stats(ctx, &column_family_stats::memtable_switch_count);
|
||||
});
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_estimated_row_size_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
utils::estimated_histogram res(0);
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res.merge(i->get_stats_metadata().estimated_partition_size);
|
||||
@@ -436,7 +437,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_estimated_row_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
|
||||
uint64_t res = 0;
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res += i->get_stats_metadata().estimated_partition_size.count();
|
||||
@@ -447,7 +448,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_estimated_column_count_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
utils::estimated_histogram res(0);
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res.merge(i->get_stats_metadata().estimated_cells_count);
|
||||
@@ -464,87 +465,87 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_pending_flushes.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx,req->param["name"] ,&replica::column_family_stats::pending_flushes);
|
||||
return get_cf_stats(ctx,req->param["name"] ,&column_family_stats::pending_flushes);
|
||||
});
|
||||
|
||||
cf::get_all_pending_flushes.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &replica::column_family_stats::pending_flushes);
|
||||
return get_cf_stats(ctx, &column_family_stats::pending_flushes);
|
||||
});
|
||||
|
||||
cf::get_read.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx,req->param["name"] ,&replica::column_family_stats::reads);
|
||||
return get_cf_stats_count(ctx,req->param["name"] ,&column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_all_read.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx, &replica::column_family_stats::reads);
|
||||
return get_cf_stats_count(ctx, &column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_write.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx, req->param["name"] ,&replica::column_family_stats::writes);
|
||||
return get_cf_stats_count(ctx, req->param["name"] ,&column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_count(ctx, &replica::column_family_stats::writes);
|
||||
return get_cf_stats_count(ctx, &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_read_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::reads);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_read_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &replica::column_family_stats::reads);
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_read_latency.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_sum(ctx,req->param["name"] ,&replica::column_family_stats::reads);
|
||||
return get_cf_stats_sum(ctx,req->param["name"] ,&column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_write_latency.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats_sum(ctx, req->param["name"] ,&replica::column_family_stats::writes);
|
||||
return get_cf_stats_sum(ctx, req->param["name"] ,&column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_read_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, &replica::column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_read_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, &replica::column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &replica::column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, &replica::column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, &replica::column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, &column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_pending_compactions.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf);
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_pending_compactions.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf);
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_live_ss_table_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, req->param["name"], &replica::column_family_stats::live_sstable_count);
|
||||
return get_cf_stats(ctx, req->param["name"], &column_family_stats::live_sstable_count);
|
||||
});
|
||||
|
||||
cf::get_all_live_ss_table_count.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &replica::column_family_stats::live_sstable_count);
|
||||
return get_cf_stats(ctx, &column_family_stats::live_sstable_count);
|
||||
});
|
||||
|
||||
cf::get_unleveled_sstables.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -600,7 +601,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_false_positives.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_get_false_positive();
|
||||
@@ -609,7 +610,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_bloom_filter_false_positives.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_get_false_positive();
|
||||
@@ -618,7 +619,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_recent_bloom_filter_false_positives.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_get_recent_false_positive();
|
||||
@@ -627,7 +628,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_recent_bloom_filter_false_positives.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_get_recent_false_positive();
|
||||
@@ -636,31 +637,31 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_false_ratio.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], ratio_holder(), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], ratio_holder(), [] (column_family& cf) {
|
||||
return boost::accumulate(*cf.get_sstables() | boost::adaptors::transformed(filter_false_positive_as_ratio_holder), ratio_holder());
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
cf::get_all_bloom_filter_false_ratio.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [] (column_family& cf) {
|
||||
return boost::accumulate(*cf.get_sstables() | boost::adaptors::transformed(filter_false_positive_as_ratio_holder), ratio_holder());
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
cf::get_recent_bloom_filter_false_ratio.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], ratio_holder(), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], ratio_holder(), [] (column_family& cf) {
|
||||
return boost::accumulate(*cf.get_sstables() | boost::adaptors::transformed(filter_recent_false_positive_as_ratio_holder), ratio_holder());
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
cf::get_all_recent_bloom_filter_false_ratio.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [] (column_family& cf) {
|
||||
return boost::accumulate(*cf.get_sstables() | boost::adaptors::transformed(filter_recent_false_positive_as_ratio_holder), ratio_holder());
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_disk_space_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_size();
|
||||
@@ -669,7 +670,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_bloom_filter_disk_space_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_size();
|
||||
@@ -678,7 +679,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_memory_size();
|
||||
@@ -687,7 +688,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_bloom_filter_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_memory_size();
|
||||
@@ -696,7 +697,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->get_summary().memory_footprint();
|
||||
@@ -705,7 +706,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->get_summary().memory_footprint();
|
||||
@@ -752,7 +753,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_true_snapshots_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
auto uuid = get_uuid(req->param["name"], ctx.db.local());
|
||||
return ctx.db.local().find_column_family(uuid).get_snapshot_details().then([](
|
||||
const std::unordered_map<sstring, replica::column_family::snapshot_details>& sd) {
|
||||
const std::unordered_map<sstring, column_family::snapshot_details>& sd) {
|
||||
int64_t res = 0;
|
||||
for (auto i : sd) {
|
||||
res += i.second.total;
|
||||
@@ -781,7 +782,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_row_cache_hit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -789,7 +790,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_row_cache_hit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -797,7 +798,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_row_cache_miss.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().misses.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -805,7 +806,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_all_row_cache_miss.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().misses.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -814,36 +815,36 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_cas_prepare.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_prepare.histogram();
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_prepare;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_cas_propose.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_accept.histogram();
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_accept;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_cas_commit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_learn.histogram();
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const column_family& cf) {
|
||||
return cf.get_stats().estimated_cas_learn;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_sstables_per_read_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](column_family& cf) {
|
||||
return cf.get_stats().estimated_sstable_per_read;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
});
|
||||
|
||||
cf::get_tombstone_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::tombstone_scanned);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::tombstone_scanned);
|
||||
});
|
||||
|
||||
cf::get_live_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::live_scanned);
|
||||
return get_cf_histogram(ctx, req->param["name"], &column_family_stats::live_scanned);
|
||||
});
|
||||
|
||||
cf::get_col_update_time_delta_histogram.set(r, [] (std::unique_ptr<request> req) {
|
||||
@@ -855,15 +856,15 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_auto_compaction.set(r, [&ctx] (const_req req) {
|
||||
auto uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
replica::column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
const utils::UUID& uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
return !cf.is_auto_compaction_disabled_by_user();
|
||||
});
|
||||
|
||||
cf::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return ctx.db.invoke_on(0, [&ctx, req = std::move(req)] (replica::database& db) {
|
||||
auto g = replica::database::autocompaction_toggle_guard(db);
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::column_family &cf) {
|
||||
return ctx.db.invoke_on(0, [&ctx, req = std::move(req)] (database& db) {
|
||||
auto g = database::autocompaction_toggle_guard(db);
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
cf.enable_auto_compaction();
|
||||
}).then([g = std::move(g)] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
@@ -872,10 +873,10 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return ctx.db.invoke_on(0, [&ctx, req = std::move(req)] (replica::database& db) {
|
||||
auto g = replica::database::autocompaction_toggle_guard(db);
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::column_family &cf) {
|
||||
return cf.disable_auto_compaction();
|
||||
return ctx.db.invoke_on(0, [&ctx, req = std::move(req)] (database& db) {
|
||||
auto g = database::autocompaction_toggle_guard(db);
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
cf.disable_auto_compaction();
|
||||
}).then([g = std::move(g)] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -895,7 +896,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
}
|
||||
std::vector<sstring> res;
|
||||
auto uuid = get_uuid(ks, cf_name, ctx.db.local());
|
||||
replica::column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
res.reserve(cf.get_index_manager().list_indexes().size());
|
||||
for (auto&& i : cf.get_index_manager().list_indexes()) {
|
||||
if (!vp.contains(secondary_index::index_table_name(i.metadata().name()))) {
|
||||
@@ -923,8 +924,8 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_compression_ratio.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto uuid = get_uuid(req->param["name"], ctx.db.local());
|
||||
|
||||
return ctx.db.map_reduce(sum_ratio<double>(), [uuid](replica::database& db) {
|
||||
replica::column_family& cf = db.find_column_family(uuid);
|
||||
return ctx.db.map_reduce(sum_ratio<double>(), [uuid](database& db) {
|
||||
column_family& cf = db.find_column_family(uuid);
|
||||
return make_ready_future<double>(get_compression_ratio(cf));
|
||||
}).then([] (const double& result) {
|
||||
return make_ready_future<json::json_return_type>(result);
|
||||
@@ -932,20 +933,20 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_read_latency_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().reads.histogram();
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const column_family& cf) {
|
||||
return cf.get_stats().estimated_read;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_write_latency_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().writes.histogram();
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const column_family& cf) {
|
||||
return cf.get_stats().estimated_write;
|
||||
});
|
||||
});
|
||||
|
||||
cf::set_compaction_strategy_class.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
sstring strategy = req->get_query_param("class_name");
|
||||
return foreach_column_family(ctx, req->param["name"], [strategy](replica::column_family& cf) {
|
||||
return foreach_column_family(ctx, req->param["name"], [strategy](column_family& cf) {
|
||||
cf.set_compaction_strategy(sstables::compaction_strategy::type(strategy));
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
@@ -969,7 +970,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cf::get_sstable_count_per_level.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], std::vector<uint64_t>(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], std::vector<uint64_t>(), [](const column_family& cf) {
|
||||
return cf.sstable_count_per_level();
|
||||
}, concat_sstable_count_per_level).then([](const std::vector<uint64_t>& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
@@ -980,7 +981,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
auto key = req->get_query_param("key");
|
||||
auto uuid = get_uuid(req->param["name"], ctx.db.local());
|
||||
|
||||
return ctx.db.map_reduce0([key, uuid] (replica::database& db) {
|
||||
return ctx.db.map_reduce0([key, uuid] (database& db) {
|
||||
return db.find_column_family(uuid).get_sstables_by_partition_key(key);
|
||||
}, std::unordered_set<sstring>(),
|
||||
[](std::unordered_set<sstring> a, std::unordered_set<sstring>&& b) mutable {
|
||||
@@ -1012,7 +1013,7 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
if (req->get_query_param("split_output") != "") {
|
||||
fail(unimplemented::cause::API);
|
||||
}
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::column_family &cf) {
|
||||
return foreach_column_family(ctx, req->param["name"], [](column_family &cf) {
|
||||
return cf.compact_all_sstables();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
|
||||
@@ -3,14 +3,27 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
#include "api/api-doc/column_family.json.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "database.hh"
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include <any>
|
||||
|
||||
@@ -18,17 +31,17 @@ namespace api {
|
||||
|
||||
void set_column_family(http_context& ctx, routes& r);
|
||||
|
||||
const table_id& get_uuid(const sstring& name, const replica::database& db);
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, std::function<void(replica::column_family&)> f);
|
||||
const utils::UUID& get_uuid(const sstring& name, const database& db);
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, std::function<void(column_family&)> f);
|
||||
|
||||
|
||||
template<class Mapper, class I, class Reducer>
|
||||
future<I> map_reduce_cf_raw(http_context& ctx, const sstring& name, I init,
|
||||
Mapper mapper, Reducer reducer) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(replica::database&)>;
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(database&)>;
|
||||
using reducer_type = std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)>;
|
||||
return ctx.db.map_reduce0(mapper_type([mapper, uuid](replica::database& db) {
|
||||
return ctx.db.map_reduce0(mapper_type([mapper, uuid](database& db) {
|
||||
return std::make_unique<std::any>(I(mapper(db.find_column_family(uuid))));
|
||||
}), std::make_unique<std::any>(std::move(init)), reducer_type([reducer = std::move(reducer)] (std::unique_ptr<std::any> a, std::unique_ptr<std::any> b) mutable {
|
||||
return std::make_unique<std::any>(I(reducer(std::any_cast<I>(std::move(*a)), std::any_cast<I>(std::move(*b)))));
|
||||
@@ -55,15 +68,15 @@ future<json::json_return_type> map_reduce_cf(http_context& ctx, const sstring& n
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> map_reduce_cf_time_histogram(http_context& ctx, const sstring& name, std::function<utils::time_estimated_histogram(const replica::column_family&)> f);
|
||||
future<json::json_return_type> map_reduce_cf_time_histogram(http_context& ctx, const sstring& name, std::function<utils::time_estimated_histogram(const column_family&)> f);
|
||||
|
||||
struct map_reduce_column_families_locally {
|
||||
std::any init;
|
||||
std::function<std::unique_ptr<std::any>(replica::column_family&)> mapper;
|
||||
std::function<std::unique_ptr<std::any>(column_family&)> mapper;
|
||||
std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)> reducer;
|
||||
future<std::unique_ptr<std::any>> operator()(replica::database& db) const {
|
||||
future<std::unique_ptr<std::any>> operator()(database& db) const {
|
||||
auto res = seastar::make_lw_shared<std::unique_ptr<std::any>>(std::make_unique<std::any>(init));
|
||||
return do_for_each(db.get_column_families(), [res, this](const std::pair<table_id, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
return do_for_each(db.get_column_families(), [res, this](const std::pair<utils::UUID, seastar::lw_shared_ptr<table>>& i) {
|
||||
*res = reducer(std::move(*res), mapper(*i.second.get()));
|
||||
}).then([res] {
|
||||
return std::move(*res);
|
||||
@@ -74,9 +87,9 @@ struct map_reduce_column_families_locally {
|
||||
template<class Mapper, class I, class Reducer>
|
||||
future<I> map_reduce_cf_raw(http_context& ctx, I init,
|
||||
Mapper mapper, Reducer reducer) {
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(replica::column_family&)>;
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(column_family&)>;
|
||||
using reducer_type = std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)>;
|
||||
auto wrapped_mapper = mapper_type([mapper = std::move(mapper)] (replica::column_family& cf) mutable {
|
||||
auto wrapped_mapper = mapper_type([mapper = std::move(mapper)] (column_family& cf) mutable {
|
||||
return std::make_unique<std::any>(I(mapper(cf)));
|
||||
});
|
||||
auto wrapped_reducer = reducer_type([reducer = std::move(reducer)] (std::unique_ptr<std::any> a, std::unique_ptr<std::any> b) mutable {
|
||||
@@ -98,10 +111,10 @@ future<json::json_return_type> map_reduce_cf(http_context& ctx, I init,
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
|
||||
int64_t replica::column_family_stats::*f);
|
||||
int64_t column_family_stats::*f);
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
int64_t replica::column_family_stats::*f);
|
||||
int64_t column_family_stats::*f);
|
||||
|
||||
|
||||
std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name);
|
||||
|
||||
@@ -3,13 +3,26 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "commitlog.hh"
|
||||
#include "db/commitlog/commitlog.hh"
|
||||
#include "api/api-doc/commitlog.json.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "database.hh"
|
||||
#include <vector>
|
||||
|
||||
namespace api {
|
||||
@@ -18,7 +31,7 @@ template<typename T>
|
||||
static auto acquire_cl_metric(http_context& ctx, std::function<T (db::commitlog*)> func) {
|
||||
typedef T ret_type;
|
||||
|
||||
return ctx.db.map_reduce0([func = std::move(func)](replica::database& db) {
|
||||
return ctx.db.map_reduce0([func = std::move(func)](database& db) {
|
||||
if (db.commitlog() == nullptr) {
|
||||
return make_ready_future<ret_type>();
|
||||
}
|
||||
@@ -34,7 +47,7 @@ void set_commitlog(http_context& ctx, routes& r) {
|
||||
auto res = make_shared<std::vector<sstring>>();
|
||||
return ctx.db.map_reduce([res](std::vector<sstring> names) {
|
||||
res->insert(res->end(), names.begin(), names.end());
|
||||
}, [](replica::database& db) {
|
||||
}, [](database& db) {
|
||||
if (db.commitlog() == nullptr) {
|
||||
return make_ready_future<std::vector<sstring>>(std::vector<sstring>());
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,19 +3,28 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "compaction_manager.hh"
|
||||
#include "compaction/compaction_manager.hh"
|
||||
#include "api/api-doc/compaction_manager.json.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "column_family.hh"
|
||||
#include "unimplemented.hh"
|
||||
#include "storage_service.hh"
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace api {
|
||||
@@ -25,7 +34,7 @@ using namespace json;
|
||||
|
||||
static future<json::json_return_type> get_cm_stats(http_context& ctx,
|
||||
int64_t compaction_manager::stats::*f) {
|
||||
return ctx.db.map_reduce0([f](replica::database& db) {
|
||||
return ctx.db.map_reduce0([f](database& db) {
|
||||
return db.get_compaction_manager().get_stats().*f;
|
||||
}, int64_t(0), std::plus<int64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
@@ -44,7 +53,7 @@ static std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_ha
|
||||
|
||||
void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
cm::get_compactions.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) {
|
||||
return ctx.db.map_reduce0([](database& db) {
|
||||
std::vector<cm::summary> summaries;
|
||||
const compaction_manager& cm = db.get_compaction_manager();
|
||||
|
||||
@@ -66,11 +75,11 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cm::get_pending_tasks_by_table.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([&ctx](replica::database& db) {
|
||||
return ctx.db.map_reduce0([&ctx](database& db) {
|
||||
return do_with(std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>(), [&ctx, &db](std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>& tasks) {
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<table_id, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
replica::table& cf = *i.second.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<utils::UUID, seastar::lw_shared_ptr<table>>& i) {
|
||||
table& cf = *i.second.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.get_compaction_strategy().estimated_pending_compactions(cf);
|
||||
return make_ready_future<>();
|
||||
}).then([&tasks] {
|
||||
return std::move(tasks);
|
||||
@@ -100,34 +109,17 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
|
||||
cm::stop_compaction.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
auto type = req->get_query_param("type");
|
||||
return ctx.db.invoke_on_all([type] (replica::database& db) {
|
||||
return ctx.db.invoke_on_all([type] (database& db) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
return cm.stop_compaction(type);
|
||||
cm.stop_compaction(type);
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
cm::stop_keyspace_compaction.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto ks_name = validate_keyspace(ctx, req->param);
|
||||
auto table_names = parse_tables(ks_name, ctx, req->query_parameters, "tables");
|
||||
if (table_names.empty()) {
|
||||
table_names = map_keys(ctx.db.local().find_keyspace(ks_name).metadata().get()->cf_meta_data());
|
||||
}
|
||||
auto type = req->get_query_param("type");
|
||||
co_await ctx.db.invoke_on_all([&ks_name, &table_names, type] (replica::database& db) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
return parallel_for_each(table_names, [&db, &cm, &ks_name, type] (sstring& table_name) {
|
||||
auto& t = db.find_column_family(ks_name, table_name);
|
||||
return cm.stop_compaction(type, &t.as_table_state());
|
||||
});
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
cm::get_pending_tasks.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf);
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api/config.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,66 +3,46 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "locator/token_metadata.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "locator/production_snitch_base.hh"
|
||||
#include "endpoint_snitch.hh"
|
||||
#include "api/api-doc/endpoint_snitch_info.json.hh"
|
||||
#include "api/api-doc/storage_service.json.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r, sharded<locator::snitch_ptr>& snitch) {
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r) {
|
||||
static auto host_or_broadcast = [](const_req req) {
|
||||
auto host = req.get_query_param("host");
|
||||
return host.empty() ? gms::inet_address(utils::fb_utilities::get_broadcast_address()) : gms::inet_address(host);
|
||||
};
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [&ctx](const_req req) {
|
||||
auto& topology = ctx.shared_token_metadata.local().get()->get_topology();
|
||||
auto ep = host_or_broadcast(req);
|
||||
if (!topology.has_endpoint(ep, locator::topology::pending::yes)) {
|
||||
// Cannot return error here, nodetool status can race, request
|
||||
// info about just-left node and not handle it nicely
|
||||
return sstring(locator::production_snitch_base::default_dc);
|
||||
}
|
||||
return topology.get_datacenter(ep);
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [](const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(host_or_broadcast(req));
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [&ctx](const_req req) {
|
||||
auto& topology = ctx.shared_token_metadata.local().get()->get_topology();
|
||||
auto ep = host_or_broadcast(req);
|
||||
if (!topology.has_endpoint(ep, locator::topology::pending::yes)) {
|
||||
// Cannot return error here, nodetool status can race, request
|
||||
// info about just-left node and not handle it nicely
|
||||
return sstring(locator::production_snitch_base::default_rack);
|
||||
}
|
||||
return topology.get_rack(ep);
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [](const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_rack(host_or_broadcast(req));
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.set(r, [&snitch] (const_req req) {
|
||||
return snitch.local()->get_name();
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.set(r, [] (const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_name();
|
||||
});
|
||||
|
||||
httpd::storage_service_json::update_snitch.set(r, [&snitch](std::unique_ptr<request> req) {
|
||||
locator::snitch_config cfg;
|
||||
cfg.name = req->get_query_param("ep_snitch_class_name");
|
||||
return locator::i_endpoint_snitch::reset_snitch(snitch, cfg).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
void unset_endpoint_snitch(http_context& ctx, routes& r) {
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.unset(r);
|
||||
httpd::endpoint_snitch_info_json::get_rack.unset(r);
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.unset(r);
|
||||
httpd::storage_service_json::update_snitch.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,20 +3,28 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace locator {
|
||||
class snitch_ptr;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r, sharded<locator::snitch_ptr>&);
|
||||
void unset_endpoint_snitch(http_context& ctx, routes& r);
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api/api-doc/error_injection.json.hh"
|
||||
@@ -12,7 +25,7 @@
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include "seastar/core/future-util.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "failure_detector.hh"
|
||||
@@ -18,7 +31,7 @@ namespace fd = httpd::failure_detector_json;
|
||||
void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
fd::get_all_endpoint_states.set(r, [&g](std::unique_ptr<request> req) {
|
||||
std::vector<fd::endpoint_state> res;
|
||||
for (auto i : g.get_endpoint_states()) {
|
||||
for (auto i : g.endpoint_state_map) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = boost::lexical_cast<std::string>(i.first);
|
||||
val.is_alive = i.second.is_alive();
|
||||
@@ -40,53 +53,54 @@ void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
});
|
||||
|
||||
fd::get_up_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
int res = g.get_up_endpoint_count();
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return gms::get_up_endpoint_count(g).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_down_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
int res = g.get_down_endpoint_count();
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return gms::get_down_endpoint_count(g).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_phi_convict_threshold.set(r, [] (std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(8);
|
||||
return gms::get_phi_convict_threshold().then([](double res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_simple_states.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
std::map<sstring, sstring> nodes_status;
|
||||
for (auto& entry : g.get_endpoint_states()) {
|
||||
nodes_status.emplace(entry.first.to_sstring(), entry.second.is_alive() ? "UP" : "DOWN");
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(nodes_status));
|
||||
return gms::get_simple_states(g).then([](const std::map<sstring, sstring>& map) {
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(map));
|
||||
});
|
||||
});
|
||||
|
||||
fd::set_phi_convict_threshold.set(r, [](std::unique_ptr<request> req) {
|
||||
double phi = atof(req->get_query_param("phi").c_str());
|
||||
return make_ready_future<json::json_return_type>("");
|
||||
return gms::set_phi_convict_threshold(phi).then([]() {
|
||||
return make_ready_future<json::json_return_type>("");
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_state.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
auto* state = g.get_endpoint_state_for_endpoint_ptr(gms::inet_address(req->param["addr"]));
|
||||
if (!state) {
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->param["addr"]));
|
||||
}
|
||||
std::stringstream ss;
|
||||
g.append_endpoint_state(ss, *state);
|
||||
return make_ready_future<json::json_return_type>(sstring(ss.str()));
|
||||
return get_endpoint_state(g, req->param["addr"]).then([](const sstring& state) {
|
||||
return make_ready_future<json::json_return_type>(state);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_phi_values.set(r, [](std::unique_ptr<request> req) {
|
||||
std::map<gms::inet_address, gms::arrival_window> map;
|
||||
std::vector<fd::endpoint_phi_value> res;
|
||||
auto now = gms::arrival_window::clk::now();
|
||||
for (auto& p : map) {
|
||||
fd::endpoint_phi_value val;
|
||||
val.endpoint = p.first.to_sstring();
|
||||
val.phi = p.second.phi(now);
|
||||
res.emplace_back(std::move(val));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return gms::get_arrival_samples().then([](std::map<gms::inet_address, gms::arrival_window> map) {
|
||||
std::vector<fd::endpoint_phi_value> res;
|
||||
auto now = gms::arrival_window::clk::now();
|
||||
for (auto& p : map) {
|
||||
fd::endpoint_phi_value val;
|
||||
val.endpoint = p.first.to_sstring();
|
||||
val.phi = p.second.phi(now);
|
||||
res.emplace_back(std::move(val));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "gossiper.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
19
api/lsa.cc
19
api/lsa.cc
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api/api-doc/lsa.json.hh"
|
||||
@@ -13,7 +26,7 @@
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "utils/logalloc.hh"
|
||||
#include "log.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "database.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -22,7 +35,7 @@ static logging::logger alogger("lsa-api");
|
||||
void set_lsa(http_context& ctx, routes& r) {
|
||||
httpd::lsa_json::lsa_compact.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
alogger.info("Triggering compaction");
|
||||
return ctx.db.invoke_on_all([] (replica::database&) {
|
||||
return ctx.db.invoke_on_all([] (database&) {
|
||||
logalloc::shard_tracker().reclaim(std::numeric_limits<size_t>::max());
|
||||
}).then([] {
|
||||
return json::json_return_type(json::json_void());
|
||||
|
||||
15
api/lsa.hh
15
api/lsa.hh
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "messaging_service.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "storage_proxy.hh"
|
||||
@@ -13,8 +26,8 @@
|
||||
#include "service/storage_service.hh"
|
||||
#include "db/config.hh"
|
||||
#include "utils/histogram.hh"
|
||||
#include "replica/database.hh"
|
||||
#include <seastar/core/scheduling_specific.hh>
|
||||
#include "database.hh"
|
||||
#include "seastar/core/scheduling_specific.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -22,9 +35,6 @@ namespace sp = httpd::storage_proxy_json;
|
||||
using proxy = service::storage_proxy;
|
||||
using namespace json;
|
||||
|
||||
utils::time_estimated_histogram timed_rate_moving_average_summary_merge(utils::time_estimated_histogram a, const utils::timed_rate_moving_average_summary_and_histogram& b) {
|
||||
return a.merge(b.histogram());
|
||||
}
|
||||
|
||||
/**
|
||||
* This function implement a two dimentional map reduce where
|
||||
@@ -58,10 +68,10 @@ future<V> two_dimensional_map_reduce(distributed<service::storage_proxy>& d,
|
||||
* @param initial_value - the initial value to use for both aggregations* @return
|
||||
* @return A future that resolves to the result of the aggregation.
|
||||
*/
|
||||
template<typename V, typename Reducer, typename F, typename C>
|
||||
template<typename V, typename Reducer, typename F>
|
||||
future<V> two_dimensional_map_reduce(distributed<service::storage_proxy>& d,
|
||||
C F::*f, Reducer reducer, V initial_value) {
|
||||
return two_dimensional_map_reduce(d, [f] (F& stats) -> V {
|
||||
V F::*f, Reducer reducer, V initial_value) {
|
||||
return two_dimensional_map_reduce(d, [f] (F& stats) {
|
||||
return stats.*f;
|
||||
}, reducer, initial_value);
|
||||
}
|
||||
@@ -115,10 +125,10 @@ utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimat
|
||||
return res;
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::timed_rate_moving_average_summary_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(ctx.sp, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).histogram();
|
||||
}, utils::time_estimated_histogram_merge, utils::time_estimated_histogram()).then([](const utils::time_estimated_histogram& val) {
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::time_estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(ctx.sp, f, utils::time_estimated_histogram_merge,
|
||||
utils::time_estimated_histogram()).then([](const utils::time_estimated_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(time_to_json_histogram(val));
|
||||
});
|
||||
}
|
||||
@@ -133,7 +143,7 @@ static future<json::json_return_type> sum_estimated_histogram(http_context& ctx
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> total_latency(http_context& ctx, utils::timed_rate_moving_average_summary_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
static future<json::json_return_type> total_latency(http_context& ctx, utils::timed_rate_moving_average_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(ctx.sp, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).hist.mean * (stats.*f).hist.count;
|
||||
}, std::plus<double>(), 0.0).then([](double val) {
|
||||
@@ -153,7 +163,7 @@ static future<json::json_return_type> total_latency(http_context& ctx, utils::t
|
||||
template<typename F>
|
||||
future<json::json_return_type>
|
||||
sum_histogram_stats_storage_proxy(distributed<proxy>& d,
|
||||
utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram F::*f) {
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).hist;
|
||||
}, std::plus<utils::ihistogram>(), utils::ihistogram()).
|
||||
@@ -173,7 +183,7 @@ sum_histogram_stats_storage_proxy(distributed<proxy>& d,
|
||||
template<typename F>
|
||||
future<json::json_return_type>
|
||||
sum_timer_stats_storage_proxy(distributed<proxy>& d,
|
||||
utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram F::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).rate();
|
||||
@@ -494,14 +504,14 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_se
|
||||
});
|
||||
|
||||
sp::get_read_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::read);
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::estimated_read);
|
||||
});
|
||||
|
||||
sp::get_read_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::read);
|
||||
});
|
||||
sp::get_write_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::write);
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::estimated_write);
|
||||
});
|
||||
|
||||
sp::get_write_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "storage_service.hh"
|
||||
@@ -11,7 +24,6 @@
|
||||
#include "db/config.hh"
|
||||
#include "db/schema_tables.hh"
|
||||
#include "utils/hash.hh"
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <time.h>
|
||||
#include <algorithm>
|
||||
@@ -25,17 +37,16 @@
|
||||
#include "db/commitlog/commitlog.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "seastar/http/exception.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/parallel_for_each.hh>
|
||||
#include "repair/row_level.hh"
|
||||
#include "repair/repair.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "column_family.hh"
|
||||
#include "log.hh"
|
||||
#include "release.hh"
|
||||
#include "compaction/compaction_manager.hh"
|
||||
#include "sstables/sstables.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "database.hh"
|
||||
#include "db/extensions.hh"
|
||||
#include "db/snapshot-ctl.hh"
|
||||
#include "transport/controller.hh"
|
||||
@@ -58,46 +69,11 @@ const locator::token_metadata& http_context::get_token_metadata() {
|
||||
namespace ss = httpd::storage_service_json;
|
||||
using namespace json;
|
||||
|
||||
sstring validate_keyspace(http_context& ctx, sstring ks_name) {
|
||||
if (ctx.db.local().has_keyspace(ks_name)) {
|
||||
return ks_name;
|
||||
static sstring validate_keyspace(http_context& ctx, const parameters& param) {
|
||||
if (ctx.db.local().has_keyspace(param["keyspace"])) {
|
||||
return param["keyspace"];
|
||||
}
|
||||
throw bad_param_exception(replica::no_such_keyspace(ks_name).what());
|
||||
}
|
||||
|
||||
sstring validate_keyspace(http_context& ctx, const parameters& param) {
|
||||
return validate_keyspace(ctx, param["keyspace"]);
|
||||
}
|
||||
|
||||
locator::host_id validate_host_id(const sstring& param) {
|
||||
auto hoep = locator::host_id_or_endpoint(param, locator::host_id_or_endpoint::param_type::host_id);
|
||||
return hoep.id;
|
||||
}
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, sstring value) {
|
||||
if (value.empty()) {
|
||||
return map_keys(ctx.db.local().find_keyspace(ks_name).metadata().get()->cf_meta_data());
|
||||
}
|
||||
std::vector<sstring> names = split(value, ",");
|
||||
try {
|
||||
for (const auto& table_name : names) {
|
||||
ctx.db.local().find_column_family(ks_name, table_name);
|
||||
}
|
||||
} catch (const replica::no_such_column_family& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
return names;
|
||||
}
|
||||
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name) {
|
||||
auto it = query_params.find(param_name);
|
||||
if (it == query_params.end()) {
|
||||
return {};
|
||||
}
|
||||
return parse_tables(ks_name, ctx, it->second);
|
||||
throw bad_param_exception("Keyspace " + param["keyspace"] + " Does not exist");
|
||||
}
|
||||
|
||||
static ss::token_range token_range_endpoints_to_json(const dht::token_range_endpoints& d) {
|
||||
@@ -123,7 +99,7 @@ using ks_cf_func = std::function<future<json::json_return_type>(http_context&, s
|
||||
static auto wrap_ks_cf(http_context &ctx, ks_cf_func f) {
|
||||
return [&ctx, f = std::move(f)](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
@@ -162,22 +138,22 @@ seastar::future<json::json_return_type> run_toppartitions_query(db::toppartition
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
future<json::json_return_type> set_tables_autocompaction(http_context& ctx, service::storage_service& ss, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
|
||||
apilog.info("set_tables_autocompaction: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
return do_with(keyspace, std::move(tables), [&ctx, enabled] (const sstring &keyspace, const std::vector<sstring>& tables) {
|
||||
return ctx.db.invoke_on(0, [&ctx, &keyspace, &tables, enabled] (replica::database& db) {
|
||||
auto g = replica::database::autocompaction_toggle_guard(db);
|
||||
return ctx.db.invoke_on_all([&keyspace, &tables, enabled] (replica::database& db) {
|
||||
return ctx.db.invoke_on(0, [&ctx, &keyspace, &tables, enabled] (database& db) {
|
||||
auto g = database::autocompaction_toggle_guard(db);
|
||||
return ctx.db.invoke_on_all([&keyspace, &tables, enabled] (database& db) {
|
||||
return parallel_for_each(tables, [&db, &keyspace, enabled] (const sstring& table) {
|
||||
replica::column_family& cf = db.find_column_family(keyspace, table);
|
||||
column_family& cf = db.find_column_family(keyspace, table);
|
||||
if (enabled) {
|
||||
cf.enable_auto_compaction();
|
||||
} else {
|
||||
return cf.disable_auto_compaction();
|
||||
cf.disable_auto_compaction();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
@@ -190,25 +166,19 @@ future<json::json_return_type> set_tables_autocompaction(http_context& ctx, cons
|
||||
|
||||
void set_transport_controller(http_context& ctx, routes& r, cql_transport::controller& ctl) {
|
||||
ss::start_native_transport.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.start_server();
|
||||
}).then([] {
|
||||
return ctl.start_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_native_transport.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.request_stop_server();
|
||||
}).then([] {
|
||||
return ctl.stop_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_native_transport_running.set(r, [&ctl] (std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return !ctl.listen_addresses().empty();
|
||||
}).then([] (bool running) {
|
||||
return ctl.is_server_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
@@ -222,25 +192,19 @@ void unset_transport_controller(http_context& ctx, routes& r) {
|
||||
|
||||
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl) {
|
||||
ss::stop_rpc_server.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.request_stop_server();
|
||||
}).then([] {
|
||||
return ctl.stop_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_rpc_server.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.start_server();
|
||||
}).then([] {
|
||||
return ctl.start_server().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_rpc_server_running.set(r, [&ctl] (std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return !ctl.listen_addresses().empty();
|
||||
}).then([] (bool running) {
|
||||
return ctl.is_server_running().then([] (bool running) {
|
||||
return make_ready_future<json::json_return_type>(running);
|
||||
});
|
||||
});
|
||||
@@ -275,14 +239,14 @@ void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_active_repair_async.set(r, [&repair] (std::unique_ptr<request> req) {
|
||||
return repair.local().get_active_repairs().then([] (std::vector<int> res) {
|
||||
ss::get_active_repair_async.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return get_active_repairs(ctx.db).then([] (std::vector<int> res){
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
ss::repair_async_status.set(r, [&repair] (std::unique_ptr<request> req) {
|
||||
return repair.local().get_status(boost::lexical_cast<int>( req->get_query_param("id")))
|
||||
ss::repair_async_status.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return repair_get_status(ctx.db, boost::lexical_cast<int>( req->get_query_param("id")))
|
||||
.then_wrapped([] (future<repair_status>&& fut) {
|
||||
ss::ns_repair_async_status::return_type_wrapper res;
|
||||
try {
|
||||
@@ -294,7 +258,7 @@ void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::repair_await_completion.set(r, [&repair] (std::unique_ptr<request> req) {
|
||||
ss::repair_await_completion.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
int id;
|
||||
using clock = std::chrono::steady_clock;
|
||||
clock::time_point expire;
|
||||
@@ -315,7 +279,7 @@ void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
} catch (std::exception& e) {
|
||||
return make_exception_future<json::json_return_type>(httpd::bad_param_exception(e.what()));
|
||||
}
|
||||
return repair.local().await_completion(id, expire)
|
||||
return repair_await_completion(ctx.db, id, expire)
|
||||
.then_wrapped([] (future<repair_status>&& fut) {
|
||||
ss::ns_repair_async_status::return_type_wrapper res;
|
||||
try {
|
||||
@@ -327,14 +291,14 @@ void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
});
|
||||
});
|
||||
|
||||
ss::force_terminate_all_repair_sessions.set(r, [&repair] (std::unique_ptr<request> req) {
|
||||
return repair.local().abort_all().then([] {
|
||||
ss::force_terminate_all_repair_sessions.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return repair_abort_all(ctx.db).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::force_terminate_all_repair_sessions_new.set(r, [&repair] (std::unique_ptr<request> req) {
|
||||
return repair.local().abort_all().then([] {
|
||||
ss::force_terminate_all_repair_sessions_new.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return repair_abort_all(ctx.db).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
@@ -401,10 +365,11 @@ static future<json::json_return_type> describe_ring_as_json(sharded<service::sto
|
||||
co_return json::json_return_type(stream_range_as_array(co_await ss.local().describe_ring(keyspace), token_range_endpoints_to_json));
|
||||
}
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ks) {
|
||||
ss::local_hostid.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto id = ctx.db.local().get_config().host_id;
|
||||
return make_ready_future<json::json_return_type>(id.to_sstring());
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs) {
|
||||
ss::local_hostid.set(r, [](std::unique_ptr<request> req) {
|
||||
return db::system_keyspace::load_local_host_id().then([](const utils::UUID& id) {
|
||||
return make_ready_future<json::json_return_type>(id.to_sstring());
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_tokens.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -520,10 +485,10 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return ctx.db.local().get_config().saved_caches_directory();
|
||||
});
|
||||
|
||||
ss::get_range_to_endpoint_map.set(r, [&ctx, &ss](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
ss::get_range_to_endpoint_map.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
std::vector<ss::maplist_mapper> res;
|
||||
co_return stream_range_as_array(co_await ss.local().get_range_to_address_map(keyspace),
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(ss.local().get_range_to_address_map(keyspace),
|
||||
[](const std::pair<dht::token_range, inet_address_vector_replica_set>& entry){
|
||||
ss::maplist_mapper m;
|
||||
if (entry.first.start()) {
|
||||
@@ -540,7 +505,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
m.value.push(address.to_sstring());
|
||||
}
|
||||
return m;
|
||||
});
|
||||
}));
|
||||
});
|
||||
|
||||
ss::get_pending_range_to_endpoint_map.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
@@ -552,13 +517,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::describe_any_ring.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
// Find an arbitrary non-system keyspace.
|
||||
auto keyspaces = ctx.db.local().get_non_local_strategy_keyspaces();
|
||||
if (keyspaces.empty()) {
|
||||
throw std::runtime_error("No keyspace provided and no non system kespace exist");
|
||||
}
|
||||
auto ks = keyspaces[0];
|
||||
return describe_ring_as_json(ss, ks);
|
||||
return describe_ring_as_json(ss, "");
|
||||
});
|
||||
|
||||
ss::describe_ring.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
@@ -571,7 +530,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::get_load.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &replica::column_family_stats::live_disk_space_used);
|
||||
return get_cf_stats(ctx, &column_family_stats::live_disk_space_used);
|
||||
});
|
||||
|
||||
ss::get_load_map.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -611,16 +570,16 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (replica::database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<table_id>>(column_families | boost::adaptors::transformed([&] (auto& cf_name) {
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<utils::UUID>>(column_families | boost::adaptors::transformed([&] (auto& cf_name) {
|
||||
return db.find_uuid(keyspace, cf_name);
|
||||
}));
|
||||
// major compact smaller tables first, to increase chances of success if low on space.
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const table_id& id) {
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const utils::UUID& id) {
|
||||
return db.find_column_family(id).get_stats().live_disk_space_used;
|
||||
});
|
||||
// as a table can be dropped during loop below, let's find it before issuing major compaction request.
|
||||
@@ -635,7 +594,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::force_keyspace_cleanup.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
if (column_families.empty()) {
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
@@ -645,20 +604,19 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return make_exception_future<json::json_return_type>(
|
||||
std::runtime_error("Can not perform cleanup operation when topology changes"));
|
||||
}
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (replica::database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<table_id>>(column_families | boost::adaptors::transformed([&] (auto& table_name) {
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (database& db) -> future<> {
|
||||
auto table_ids = boost::copy_range<std::vector<utils::UUID>>(column_families | boost::adaptors::transformed([&] (auto& table_name) {
|
||||
return db.find_uuid(keyspace, table_name);
|
||||
}));
|
||||
// cleanup smaller tables first, to increase chances of success if low on space.
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const table_id& id) {
|
||||
std::ranges::sort(table_ids, std::less<>(), [&] (const utils::UUID& id) {
|
||||
return db.find_column_family(id).get_stats().live_disk_space_used;
|
||||
});
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto owned_ranges_ptr = compaction::make_owned_ranges_ptr(db.get_keyspace_local_ranges(keyspace));
|
||||
// as a table can be dropped during loop below, let's find it before issuing the cleanup request.
|
||||
for (auto& id : table_ids) {
|
||||
replica::table& t = db.find_column_family(id);
|
||||
co_await cm.perform_cleanup(owned_ranges_ptr, t.as_table_state());
|
||||
table& t = db.find_column_family(id);
|
||||
co_await cm.perform_cleanup(db, &t);
|
||||
}
|
||||
co_return;
|
||||
}).then([]{
|
||||
@@ -667,42 +625,33 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
});
|
||||
|
||||
ss::perform_keyspace_offstrategy_compaction.set(r, wrap_ks_cf(ctx, [] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> tables) -> future<json::json_return_type> {
|
||||
co_return co_await ctx.db.map_reduce0([&keyspace, &tables] (replica::database& db) -> future<bool> {
|
||||
bool needed = false;
|
||||
for (const auto& table : tables) {
|
||||
auto& t = db.find_column_family(keyspace, table);
|
||||
needed |= co_await t.perform_offstrategy_compaction();
|
||||
}
|
||||
co_return needed;
|
||||
}, false, std::plus<bool>());
|
||||
}));
|
||||
|
||||
ss::upgrade_sstables.set(r, wrap_ks_cf(ctx, [] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
bool exclude_current_version = req_param<bool>(*req, "exclude_current_version", false);
|
||||
|
||||
return ctx.db.invoke_on_all([=] (replica::database& db) {
|
||||
auto owned_ranges_ptr = compaction::make_owned_ranges_ptr(db.get_keyspace_local_ranges(keyspace));
|
||||
return ctx.db.invoke_on_all([=] (database& db) {
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto& cf = db.find_column_family(keyspace, cfname);
|
||||
return cm.perform_sstable_upgrade(owned_ranges_ptr, cf.as_table_state(), exclude_current_version);
|
||||
return cm.perform_sstable_upgrade(db, &cf, exclude_current_version);
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
}));
|
||||
|
||||
ss::force_keyspace_flush.set(r, [&ctx](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
ss::force_keyspace_flush.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto& db = ctx.db;
|
||||
auto column_families = split_cf(req->get_query_param("cf"));
|
||||
if (column_families.empty()) {
|
||||
co_await replica::database::flush_keyspace_on_all_shards(db, keyspace);
|
||||
} else {
|
||||
co_await replica::database::flush_tables_on_all_shards(db, keyspace, std::move(column_families));
|
||||
column_families = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
co_return json_void();
|
||||
return ctx.db.invoke_on_all([keyspace, column_families] (database& db) {
|
||||
return parallel_for_each(column_families, [&db, keyspace](const sstring& cf) mutable {
|
||||
return db.find_column_family(keyspace, cf).flush();
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -720,23 +669,20 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::remove_node.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
auto host_id = validate_host_id(req->get_query_param("host_id"));
|
||||
auto host_id = req->get_query_param("host_id");
|
||||
std::vector<sstring> ignore_nodes_strs= split(req->get_query_param("ignore_nodes"), ",");
|
||||
auto ignore_nodes = std::list<locator::host_id_or_endpoint>();
|
||||
auto ignore_nodes = std::list<gms::inet_address>();
|
||||
for (std::string n : ignore_nodes_strs) {
|
||||
try {
|
||||
std::replace(n.begin(), n.end(), '\"', ' ');
|
||||
std::replace(n.begin(), n.end(), '\'', ' ');
|
||||
boost::trim_all(n);
|
||||
if (!n.empty()) {
|
||||
auto hoep = locator::host_id_or_endpoint(n);
|
||||
if (!ignore_nodes.empty() && hoep.has_host_id() != ignore_nodes.front().has_host_id()) {
|
||||
throw std::runtime_error("All nodes should be identified using the same method: either Host IDs or ip addresses.");
|
||||
}
|
||||
ignore_nodes.push_back(std::move(hoep));
|
||||
auto node = gms::inet_address(n);
|
||||
ignore_nodes.push_back(node);
|
||||
}
|
||||
} catch (...) {
|
||||
throw std::runtime_error(format("Failed to parse ignore_nodes parameter: ignore_nodes={}, node={}: {}", ignore_nodes_strs, n, std::current_exception()));
|
||||
throw std::runtime_error(format("Failed to parse ignore_nodes parameter: ignore_nodes={}, node={}", ignore_nodes_strs, n));
|
||||
}
|
||||
}
|
||||
return ss.local().removenode(host_id, std::move(ignore_nodes)).then([] {
|
||||
@@ -777,19 +723,19 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::get_operation_mode.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([] (auto mode) {
|
||||
return make_ready_future<json::json_return_type>(format("{}", mode));
|
||||
return make_ready_future<json::json_return_type>(mode);
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_starting.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([] (auto mode) {
|
||||
return make_ready_future<json::json_return_type>(mode <= service::storage_service::mode::STARTING);
|
||||
return ss.local().is_starting().then([] (auto starting) {
|
||||
return make_ready_future<json::json_return_type>(starting);
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_drain_progress.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce(adder<replica::database::drain_progress>(), [] (auto& db) {
|
||||
return db.get_drain_progress();
|
||||
ss::get_drain_progress.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.map_reduce(adder<service::storage_service::drain_progress>(), [] (auto& ss) {
|
||||
return ss.get_drain_progress();
|
||||
}).then([] (auto&& progress) {
|
||||
auto progress_str = format("Drained {}/{} ColumnFamilies", progress.remaining_cfs, progress.total_cfs);
|
||||
return make_ready_future<json::json_return_type>(std::move(progress_str));
|
||||
@@ -812,13 +758,22 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::get_keyspaces.set(r, [&ctx](const_req req) {
|
||||
auto type = req.get_query_param("type");
|
||||
if (type == "user") {
|
||||
return ctx.db.local().get_user_keyspaces();
|
||||
return ctx.db.local().get_non_system_keyspaces();
|
||||
} else if (type == "non_local_strategy") {
|
||||
return ctx.db.local().get_non_local_strategy_keyspaces();
|
||||
return map_keys(ctx.db.local().get_keyspaces() | boost::adaptors::filtered([](const auto& p) {
|
||||
return p.second.get_replication_strategy().get_type() != locator::replication_strategy_type::local;
|
||||
}));
|
||||
}
|
||||
return map_keys(ctx.db.local().get_keyspaces());
|
||||
});
|
||||
|
||||
ss::update_snitch.set(r, [](std::unique_ptr<request> req) {
|
||||
auto ep_snitch_class_name = req->get_query_param("ep_snitch_class_name");
|
||||
return locator::i_endpoint_snitch::reset_snitch(ep_snitch_class_name).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_gossiping.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().stop_gossiping().then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
@@ -844,13 +799,9 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::is_initialized.set(r, [&ss, &g](std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([&g] (auto mode) {
|
||||
bool is_initialized = mode >= service::storage_service::mode::STARTING;
|
||||
if (mode == service::storage_service::mode::NORMAL) {
|
||||
is_initialized = g.is_enabled();
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(is_initialized);
|
||||
ss::is_initialized.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().is_initialized().then([] (bool initialized) {
|
||||
return make_ready_future<json::json_return_type>(initialized);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -859,9 +810,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::is_joined.set(r, [&ss] (std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([] (auto mode) {
|
||||
return make_ready_future<json::json_return_type>(mode >= service::storage_service::mode::JOINING);
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(ss.local().is_joined());
|
||||
});
|
||||
|
||||
ss::set_stream_throughput_mb_per_sec.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -892,7 +841,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::is_incremental_backups_enabled.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
// If this is issued in parallel with an ongoing change, we may see values not agreeing.
|
||||
// Reissuing is asking for trouble, so we will just return true upon seeing any true value.
|
||||
return ctx.db.map_reduce(adder<bool>(), [] (replica::database& db) {
|
||||
return ctx.db.map_reduce(adder<bool>(), [] (database& db) {
|
||||
for (auto& pair: db.get_keyspaces()) {
|
||||
auto& ks = pair.second;
|
||||
if (ks.incremental_backups_enabled()) {
|
||||
@@ -908,7 +857,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::set_incremental_backups_enabled.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto val_str = req->get_query_param("value");
|
||||
bool value = (val_str == "True") || (val_str == "true") || (val_str == "1");
|
||||
return ctx.db.invoke_on_all([value] (replica::database& db) {
|
||||
return ctx.db.invoke_on_all([value] (database& db) {
|
||||
db.set_enable_incremental_backups(value);
|
||||
|
||||
// Change both KS and CF, so they are in sync
|
||||
@@ -960,11 +909,11 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
ss::reset_local_schema.set(r, [&sys_ks](std::unique_ptr<request> req) {
|
||||
ss::reset_local_schema.set(r, [](std::unique_ptr<request> req) {
|
||||
// FIXME: We should truncate schema tables if more than one node in the cluster.
|
||||
auto& sp = service::get_storage_proxy();
|
||||
auto& fs = sp.local().features();
|
||||
return db::schema_tables::recalculate_schema_version(sys_ks, sp, fs).then([] {
|
||||
return db::schema_tables::recalculate_schema_version(sp, fs).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
@@ -1030,18 +979,18 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
}
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
ss::enable_auto_compaction.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto tables = split_cf(req->get_query_param("cf"));
|
||||
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
return set_tables_autocompaction(ctx, ss.local(), keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
ss::disable_auto_compaction.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto tables = split_cf(req->get_query_param("cf"));
|
||||
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
return set_tables_autocompaction(ctx, ss.local(), keyspace, tables, false);
|
||||
});
|
||||
|
||||
ss::deliver_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -1106,7 +1055,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::get_metrics_load.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return get_cf_stats(ctx, &replica::column_family_stats::live_disk_space_used);
|
||||
return get_cf_stats(ctx, &column_family_stats::live_disk_space_used);
|
||||
});
|
||||
|
||||
ss::get_exceptions.set(r, [&ss](const_req req) {
|
||||
@@ -1168,7 +1117,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
}
|
||||
}
|
||||
}
|
||||
}, [ks, cf](const replica::database& db) {
|
||||
}, [ks, cf](const database& db) {
|
||||
// see above
|
||||
table_sstables_list res;
|
||||
|
||||
@@ -1190,7 +1139,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::sstable info;
|
||||
|
||||
info.timestamp = t;
|
||||
info.generation = sstables::generation_value(sstable->generation());
|
||||
info.generation = sstable->generation();
|
||||
info.level = sstable->get_sstable_level();
|
||||
info.size = sstable->bytes_on_disk();
|
||||
info.data_size = sstable->ondisk_data_size();
|
||||
@@ -1267,13 +1216,6 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
}
|
||||
|
||||
enum class scrub_status {
|
||||
successful = 0,
|
||||
aborted,
|
||||
unable_to_cancel, // Not used in Scylla, included to ensure compability with nodetool api.
|
||||
validation_errors,
|
||||
};
|
||||
|
||||
void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
ss::get_snapshot_details.set(r, [&snap_ctl](std::unique_ptr<request> req) {
|
||||
return snap_ctl.local().get_snapshot_details().then([] (std::unordered_map<sstring, std::vector<db::snapshot_ctl::snapshot_details>>&& result) {
|
||||
@@ -1312,52 +1254,40 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
});
|
||||
});
|
||||
|
||||
ss::take_snapshot.set(r, [&ctx, &snap_ctl](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
apilog.info("take_snapshot: {}", req->query_parameters);
|
||||
ss::take_snapshot.set(r, [&snap_ctl](std::unique_ptr<request> req) {
|
||||
apilog.debug("take_snapshot: {}", req->query_parameters);
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto column_families = split(req->get_query_param("cf"), ",");
|
||||
auto sfopt = req->get_query_param("sf");
|
||||
auto sf = db::snapshot_ctl::skip_flush(strcasecmp(sfopt.c_str(), "true") == 0);
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
try {
|
||||
if (column_families.empty()) {
|
||||
co_await snap_ctl.local().take_snapshot(tag, keynames, sf);
|
||||
} else {
|
||||
if (keynames.empty()) {
|
||||
throw httpd::bad_param_exception("The keyspace of column families must be specified");
|
||||
}
|
||||
if (keynames.size() > 1) {
|
||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||
}
|
||||
for (const auto& table_name : column_families) {
|
||||
auto& t = ctx.db.local().find_column_family(keynames[0], table_name);
|
||||
if (t.schema()->is_view()) {
|
||||
throw std::invalid_argument("Do not take a snapshot of a materialized view or a secondary index by itself. Run snapshot on the base table instead.");
|
||||
}
|
||||
}
|
||||
co_await snap_ctl.local().take_column_family_snapshot(keynames[0], column_families, tag, db::snapshot_ctl::snap_views::yes, sf);
|
||||
|
||||
auto resp = make_ready_future<>();
|
||||
if (column_families.empty()) {
|
||||
resp = snap_ctl.local().take_snapshot(tag, keynames, sf);
|
||||
} else {
|
||||
if (keynames.empty()) {
|
||||
throw httpd::bad_param_exception("The keyspace of column families must be specified");
|
||||
}
|
||||
co_return json_void();
|
||||
} catch (...) {
|
||||
apilog.error("take_snapshot failed: {}", std::current_exception());
|
||||
throw;
|
||||
if (keynames.size() > 1) {
|
||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||
}
|
||||
resp = snap_ctl.local().take_column_family_snapshot(keynames[0], column_families, tag, sf);
|
||||
}
|
||||
return resp.then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::del_snapshot.set(r, [&snap_ctl](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
apilog.info("del_snapshot: {}", req->query_parameters);
|
||||
ss::del_snapshot.set(r, [&snap_ctl](std::unique_ptr<request> req) {
|
||||
auto tag = req->get_query_param("tag");
|
||||
auto column_family = req->get_query_param("cf");
|
||||
|
||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||
try {
|
||||
co_await snap_ctl.local().clear_snapshot(tag, keynames, column_family);
|
||||
co_return json_void();
|
||||
} catch (...) {
|
||||
apilog.error("del_snapshot failed: {}", std::current_exception());
|
||||
throw;
|
||||
}
|
||||
return snap_ctl.local().clear_snapshot(tag, keynames, column_family).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::true_snapshots_size.set(r, [&snap_ctl](std::unique_ptr<request> req) {
|
||||
@@ -1366,29 +1296,17 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
});
|
||||
});
|
||||
|
||||
ss::scrub.set(r, [&ctx, &snap_ctl] (std::unique_ptr<request> req) {
|
||||
auto rp = req_params({
|
||||
{"keyspace", {mandatory::yes}},
|
||||
{"cf", {""}},
|
||||
{"scrub_mode", {}},
|
||||
{"skip_corrupted", {}},
|
||||
{"disable_snapshot", {}},
|
||||
{"quarantine_mode", {}},
|
||||
});
|
||||
rp.process(*req);
|
||||
auto keyspace = validate_keyspace(ctx, *rp.get("keyspace"));
|
||||
auto column_families = parse_tables(keyspace, ctx, *rp.get("cf"));
|
||||
auto scrub_mode_opt = rp.get("scrub_mode");
|
||||
ss::scrub.set(r, wrap_ks_cf(ctx, [&snap_ctl] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
auto scrub_mode = sstables::compaction_type_options::scrub::mode::abort;
|
||||
|
||||
if (!scrub_mode_opt) {
|
||||
const auto skip_corrupted = rp.get_as<bool>("skip_corrupted").value_or(false);
|
||||
const sstring scrub_mode_str = req_param<sstring>(*req, "scrub_mode", "");
|
||||
if (scrub_mode_str == "") {
|
||||
const auto skip_corrupted = req_param<bool>(*req, "skip_corrupted", false);
|
||||
|
||||
if (skip_corrupted) {
|
||||
scrub_mode = sstables::compaction_type_options::scrub::mode::skip;
|
||||
}
|
||||
} else {
|
||||
auto scrub_mode_str = *scrub_mode_opt;
|
||||
if (scrub_mode_str == "ABORT") {
|
||||
scrub_mode = sstables::compaction_type_options::scrub::mode::abort;
|
||||
} else if (scrub_mode_str == "SKIP") {
|
||||
@@ -1398,7 +1316,7 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
} else if (scrub_mode_str == "VALIDATE") {
|
||||
scrub_mode = sstables::compaction_type_options::scrub::mode::validate;
|
||||
} else {
|
||||
throw httpd::bad_param_exception(fmt::format("Unknown argument for 'scrub_mode' parameter: {}", scrub_mode_str));
|
||||
throw std::invalid_argument(fmt::format("Unknown argument for 'scrub_mode' parameter: {}", scrub_mode_str));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1406,57 +1324,22 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
if (!req_param<bool>(*req, "disable_snapshot", false)) {
|
||||
auto tag = format("pre-scrub-{:d}", db_clock::now().time_since_epoch().count());
|
||||
f = parallel_for_each(column_families, [&snap_ctl, keyspace, tag](sstring cf) {
|
||||
// We always pass here db::snapshot_ctl::snap_views::no since:
|
||||
// 1. When scrubbing particular tables, there's no need to auto-snapshot their views.
|
||||
// 2. When scrubbing the whole keyspace, column_families will contain both base tables and views.
|
||||
return snap_ctl.local().take_column_family_snapshot(keyspace, cf, tag, db::snapshot_ctl::snap_views::no, db::snapshot_ctl::skip_flush::no);
|
||||
return snap_ctl.local().take_column_family_snapshot(keyspace, cf, tag);
|
||||
});
|
||||
}
|
||||
|
||||
sstables::compaction_type_options::scrub opts = {
|
||||
.operation_mode = scrub_mode,
|
||||
};
|
||||
const sstring quarantine_mode_str = req_param<sstring>(*req, "quarantine_mode", "INCLUDE");
|
||||
if (quarantine_mode_str == "INCLUDE") {
|
||||
opts.quarantine_operation_mode = sstables::compaction_type_options::scrub::quarantine_mode::include;
|
||||
} else if (quarantine_mode_str == "EXCLUDE") {
|
||||
opts.quarantine_operation_mode = sstables::compaction_type_options::scrub::quarantine_mode::exclude;
|
||||
} else if (quarantine_mode_str == "ONLY") {
|
||||
opts.quarantine_operation_mode = sstables::compaction_type_options::scrub::quarantine_mode::only;
|
||||
} else {
|
||||
throw httpd::bad_param_exception(fmt::format("Unknown argument for 'quarantine_mode' parameter: {}", quarantine_mode_str));
|
||||
}
|
||||
|
||||
const auto& reduce_compaction_stats = [] (const compaction_manager::compaction_stats_opt& lhs, const compaction_manager::compaction_stats_opt& rhs) {
|
||||
sstables::compaction_stats stats{};
|
||||
stats += lhs.value();
|
||||
stats += rhs.value();
|
||||
return stats;
|
||||
};
|
||||
|
||||
return f.then([&ctx, keyspace, column_families, opts, &reduce_compaction_stats] {
|
||||
return ctx.db.map_reduce0([=] (replica::database& db) {
|
||||
return map_reduce(column_families, [=, &db] (sstring cfname) {
|
||||
return f.then([&ctx, keyspace, column_families, scrub_mode] {
|
||||
return ctx.db.invoke_on_all([=] (database& db) {
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto& cf = db.find_column_family(keyspace, cfname);
|
||||
return cm.perform_sstable_scrub(cf.as_table_state(), opts);
|
||||
}, std::make_optional(sstables::compaction_stats{}), reduce_compaction_stats);
|
||||
}, std::make_optional(sstables::compaction_stats{}), reduce_compaction_stats);
|
||||
}).then_wrapped([] (auto f) {
|
||||
if (f.failed()) {
|
||||
auto ex = f.get_exception();
|
||||
if (try_catch<sstables::compaction_aborted_exception>(ex)) {
|
||||
return make_ready_future<json::json_return_type>(static_cast<int>(scrub_status::aborted));
|
||||
} else {
|
||||
return make_exception_future<json::json_return_type>(std::move(ex));
|
||||
}
|
||||
} else if (f.get()->validation_errors) {
|
||||
return make_ready_future<json::json_return_type>(static_cast<int>(scrub_status::validation_errors));
|
||||
} else {
|
||||
return make_ready_future<json::json_return_type>(static_cast<int>(scrub_status::successful));
|
||||
}
|
||||
return cm.perform_sstable_scrub(&cf, scrub_mode);
|
||||
});
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
void unset_snapshot(http_context& ctx, routes& r) {
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -19,7 +32,6 @@ class snapshot_ctl;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
@@ -34,16 +46,7 @@ class gossiper;
|
||||
|
||||
namespace api {
|
||||
|
||||
// verify that the keyspace parameter is found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective keyspace error.
|
||||
sstring validate_keyspace(http_context& ctx, const parameters& param);
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name);
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ls);
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs);
|
||||
void set_sstables_loader(http_context& ctx, routes& r, sharded<sstables_loader>& sst_loader);
|
||||
void unset_sstables_loader(http_context& ctx, routes& r);
|
||||
void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_builder>& vb);
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "stream_manager.hh"
|
||||
@@ -74,13 +87,13 @@ static hs::stream_state get_state(
|
||||
return state;
|
||||
}
|
||||
|
||||
void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_manager>& sm) {
|
||||
void set_stream_manager(http_context& ctx, routes& r) {
|
||||
hs::get_current_streams.set(r,
|
||||
[&sm] (std::unique_ptr<request> req) {
|
||||
return sm.invoke_on_all([] (auto& sm) {
|
||||
[] (std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().invoke_on_all([] (auto& sm) {
|
||||
return sm.update_all_progress_info();
|
||||
}).then([&sm] {
|
||||
return sm.map_reduce0([](streaming::stream_manager& stream) {
|
||||
}).then([] {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
|
||||
std::vector<hs::stream_state> res;
|
||||
for (auto i : stream.get_initiated_streams()) {
|
||||
res.push_back(get_state(*i.second.get()));
|
||||
@@ -96,17 +109,17 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_all_active_streams_outbound.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
return sm.map_reduce0([](streaming::stream_manager& stream) {
|
||||
hs::get_all_active_streams_outbound.set(r, [](std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
|
||||
return stream.get_initiated_streams().size();
|
||||
}, 0, std::plus<int64_t>()).then([](int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_total_incoming_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
hs::get_total_incoming_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address peer(req->param["peer"]);
|
||||
return sm.map_reduce0([peer](streaming::stream_manager& sm) {
|
||||
return streaming::get_stream_manager().map_reduce0([peer](streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards(peer).then([] (auto sbytes) {
|
||||
return sbytes.bytes_received;
|
||||
});
|
||||
@@ -115,8 +128,8 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_all_total_incoming_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
return sm.map_reduce0([](streaming::stream_manager& sm) {
|
||||
hs::get_all_total_incoming_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards().then([] (auto sbytes) {
|
||||
return sbytes.bytes_received;
|
||||
});
|
||||
@@ -125,9 +138,9 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_total_outgoing_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
hs::get_total_outgoing_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
gms::inet_address peer(req->param["peer"]);
|
||||
return sm.map_reduce0([peer] (streaming::stream_manager& sm) {
|
||||
return streaming::get_stream_manager().map_reduce0([peer] (streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards(peer).then([] (auto sbytes) {
|
||||
return sbytes.bytes_sent;
|
||||
});
|
||||
@@ -136,8 +149,8 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_all_total_outgoing_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
return sm.map_reduce0([](streaming::stream_manager& sm) {
|
||||
hs::get_all_total_outgoing_bytes.set(r, [](std::unique_ptr<request> req) {
|
||||
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards().then([] (auto sbytes) {
|
||||
return sbytes.bytes_sent;
|
||||
});
|
||||
@@ -147,13 +160,4 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
}
|
||||
|
||||
void unset_stream_manager(http_context& ctx, routes& r) {
|
||||
hs::get_current_streams.unset(r);
|
||||
hs::get_all_active_streams_outbound.unset(r);
|
||||
hs::get_total_incoming_bytes.unset(r);
|
||||
hs::get_all_total_incoming_bytes.unset(r);
|
||||
hs::get_total_outgoing_bytes.unset(r);
|
||||
hs::get_all_total_outgoing_bytes.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -12,7 +25,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_manager>& sm);
|
||||
void unset_stream_manager(http_context& ctx, routes& r);
|
||||
void set_stream_manager(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api/api-doc/system.json.hh"
|
||||
@@ -12,7 +25,7 @@
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "database.hh"
|
||||
|
||||
extern logging::logger apilog;
|
||||
|
||||
@@ -61,19 +74,9 @@ void set_system(http_context& ctx, routes& r) {
|
||||
return json::json_void();
|
||||
});
|
||||
|
||||
hs::write_log_message.set(r, [](const_req req) {
|
||||
try {
|
||||
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
|
||||
apilog.log(level, "/system/log: {}", std::string(req.get_query_param("message")));
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
|
||||
}
|
||||
return json::json_void();
|
||||
});
|
||||
|
||||
hs::drop_sstable_caches.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
apilog.info("Dropping sstable caches");
|
||||
return ctx.db.invoke_on_all([] (replica::database& db) {
|
||||
return ctx.db.invoke_on_all([] (database& db) {
|
||||
return db.drop_caches();
|
||||
}).then([] {
|
||||
apilog.info("Caches dropped");
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -1,164 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "task_manager.hh"
|
||||
#include "api/api-doc/task_manager.json.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "column_family.hh"
|
||||
#include "unimplemented.hh"
|
||||
#include "storage_service.hh"
|
||||
|
||||
#include <utility>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace tm = httpd::task_manager_json;
|
||||
using namespace json;
|
||||
|
||||
inline bool filter_tasks(tasks::task_manager::task_ptr task, std::unordered_map<sstring, sstring>& query_params) {
|
||||
return (!query_params.contains("keyspace") || query_params["keyspace"] == task->get_status().keyspace) &&
|
||||
(!query_params.contains("table") || query_params["table"] == task->get_status().table);
|
||||
}
|
||||
|
||||
struct full_task_status {
|
||||
tasks::task_manager::task::status task_status;
|
||||
tasks::task_manager::task::progress progress;
|
||||
std::string module;
|
||||
tasks::task_id parent_id;
|
||||
tasks::is_abortable abortable;
|
||||
};
|
||||
|
||||
struct task_stats {
|
||||
task_stats(tasks::task_manager::task_ptr task) : task_id(task->id().to_sstring()), state(task->get_status().state) {}
|
||||
|
||||
sstring task_id;
|
||||
tasks::task_manager::task_state state;
|
||||
};
|
||||
|
||||
tm::task_status make_status(full_task_status status) {
|
||||
auto start_time = db_clock::to_time_t(status.task_status.start_time);
|
||||
auto end_time = db_clock::to_time_t(status.task_status.end_time);
|
||||
::tm st, et;
|
||||
::gmtime_r(&end_time, &et);
|
||||
::gmtime_r(&start_time, &st);
|
||||
|
||||
tm::task_status res{};
|
||||
res.id = status.task_status.id.to_sstring();
|
||||
res.type = status.task_status.type;
|
||||
res.state = status.task_status.state;
|
||||
res.is_abortable = bool(status.abortable);
|
||||
res.start_time = st;
|
||||
res.end_time = et;
|
||||
res.error = status.task_status.error;
|
||||
res.parent_id = status.parent_id.to_sstring();
|
||||
res.sequence_number = status.task_status.sequence_number;
|
||||
res.shard = status.task_status.shard;
|
||||
res.keyspace = status.task_status.keyspace;
|
||||
res.table = status.task_status.table;
|
||||
res.entity = status.task_status.entity;
|
||||
res.progress_units = status.task_status.progress_units;
|
||||
res.progress_total = status.progress.total;
|
||||
res.progress_completed = status.progress.completed;
|
||||
return res;
|
||||
}
|
||||
|
||||
future<json::json_return_type> retrieve_status(tasks::task_manager::foreign_task_ptr task) {
|
||||
if (task.get() == nullptr) {
|
||||
co_return coroutine::return_exception(httpd::bad_param_exception("Task not found"));
|
||||
}
|
||||
auto progress = co_await task->get_progress();
|
||||
full_task_status s;
|
||||
s.task_status = task->get_status();
|
||||
s.parent_id = task->get_parent_id();
|
||||
s.abortable = task->is_abortable();
|
||||
s.module = task->get_module_name();
|
||||
s.progress.completed = progress.completed;
|
||||
s.progress.total = progress.total;
|
||||
co_return make_status(s);
|
||||
}
|
||||
|
||||
void set_task_manager(http_context& ctx, routes& r) {
|
||||
tm::get_modules.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
std::vector<std::string> v = boost::copy_range<std::vector<std::string>>(ctx.tm.local().get_modules() | boost::adaptors::map_keys);
|
||||
co_return v;
|
||||
});
|
||||
|
||||
tm::get_tasks.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
using chunked_stats = utils::chunked_vector<task_stats>;
|
||||
auto internal = tasks::is_internal{req_param<bool>(*req, "internal", false)};
|
||||
std::vector<chunked_stats> res = co_await ctx.tm.map([&req, internal] (tasks::task_manager& tm) {
|
||||
chunked_stats local_res;
|
||||
auto module = tm.find_module(req->param["module"]);
|
||||
const auto& filtered_tasks = module->get_tasks() | boost::adaptors::filtered([¶ms = req->query_parameters, internal] (const auto& task) {
|
||||
return (internal || !task.second->is_internal()) && filter_tasks(task.second, params);
|
||||
});
|
||||
for (auto& [task_id, task] : filtered_tasks) {
|
||||
local_res.push_back(task_stats{task});
|
||||
}
|
||||
return local_res;
|
||||
});
|
||||
|
||||
std::function<future<>(output_stream<char>&&)> f = [r = std::move(res)] (output_stream<char>&& os) -> future<> {
|
||||
auto s = std::move(os);
|
||||
auto res = std::move(r);
|
||||
co_await s.write("[");
|
||||
std::string delim = "";
|
||||
for (auto& v: res) {
|
||||
for (auto& stats: v) {
|
||||
co_await s.write(std::exchange(delim, ", "));
|
||||
tm::task_stats ts;
|
||||
ts = stats;
|
||||
co_await formatter::write(s, ts);
|
||||
}
|
||||
}
|
||||
co_await s.write("]");
|
||||
co_await s.close();
|
||||
};
|
||||
co_return std::move(f);
|
||||
});
|
||||
|
||||
tm::get_task_status.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
auto task = co_await tasks::task_manager::invoke_on_task(ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
auto state = task->get_status().state;
|
||||
if (state == tasks::task_manager::task_state::done || state == tasks::task_manager::task_state::failed) {
|
||||
task->unregister_task();
|
||||
}
|
||||
co_return std::move(task);
|
||||
}));
|
||||
co_return co_await retrieve_status(std::move(task));
|
||||
});
|
||||
|
||||
tm::abort_task.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
if (!task->is_abortable()) {
|
||||
co_await coroutine::return_exception(std::runtime_error("Requested task cannot be aborted"));
|
||||
}
|
||||
co_await task->abort();
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tm::wait_task.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
auto task = co_await tasks::task_manager::invoke_on_task(ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) {
|
||||
return task->done().then_wrapped([task] (auto f) {
|
||||
task->unregister_task();
|
||||
f.get();
|
||||
return make_foreign(task);
|
||||
});
|
||||
}));
|
||||
co_return co_await retrieve_status(std::move(task));
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "task_manager_test.hh"
|
||||
#include "api/api-doc/task_manager_test.json.hh"
|
||||
#include "tasks/test_module.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace tmt = httpd::task_manager_test_json;
|
||||
using namespace json;
|
||||
|
||||
void set_task_manager_test(http_context& ctx, routes& r, db::config& cfg) {
|
||||
tmt::register_test_module.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
co_await ctx.tm.invoke_on_all([] (tasks::task_manager& tm) {
|
||||
auto m = make_shared<tasks::test_module>(tm);
|
||||
tm.register_module("test", m);
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::unregister_test_module.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
co_await ctx.tm.invoke_on_all([] (tasks::task_manager& tm) -> future<> {
|
||||
auto module_name = "test";
|
||||
auto module = tm.find_module(module_name);
|
||||
co_await module->stop();
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::register_test_task.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
sharded<tasks::task_manager>& tms = ctx.tm;
|
||||
auto it = req->query_parameters.find("task_id");
|
||||
auto id = it != req->query_parameters.end() ? tasks::task_id{utils::UUID{it->second}} : tasks::task_id::create_null_id();
|
||||
it = req->query_parameters.find("shard");
|
||||
unsigned shard = it != req->query_parameters.end() ? boost::lexical_cast<unsigned>(it->second) : 0;
|
||||
it = req->query_parameters.find("keyspace");
|
||||
std::string keyspace = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("table");
|
||||
std::string table = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("type");
|
||||
std::string type = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("entity");
|
||||
std::string entity = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("parent_id");
|
||||
tasks::task_info data;
|
||||
if (it != req->query_parameters.end()) {
|
||||
data.id = tasks::task_id{utils::UUID{it->second}};
|
||||
auto parent_ptr = co_await tasks::task_manager::lookup_task_on_all_shards(ctx.tm, data.id);
|
||||
data.shard = parent_ptr->get_status().shard;
|
||||
}
|
||||
|
||||
auto module = tms.local().find_module("test");
|
||||
id = co_await module->make_task<tasks::test_task_impl>(shard, id, keyspace, table, type, entity, data);
|
||||
co_await tms.invoke_on(shard, [id] (tasks::task_manager& tm) {
|
||||
auto it = tm.get_all_tasks().find(id);
|
||||
if (it != tm.get_all_tasks().end()) {
|
||||
it->second->start();
|
||||
}
|
||||
});
|
||||
co_return id.to_sstring();
|
||||
});
|
||||
|
||||
tmt::unregister_test_task.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->query_parameters["task_id"]}};
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
tasks::test_task test_task{task};
|
||||
co_await test_task.unregister_task();
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::finish_test_task.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
auto it = req->query_parameters.find("error");
|
||||
bool fail = it != req->query_parameters.end();
|
||||
std::string error = fail ? it->second : "";
|
||||
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [fail, error = std::move(error)] (tasks::task_manager::task_ptr task) {
|
||||
tasks::test_task test_task{task};
|
||||
if (fail) {
|
||||
test_task.finish_failed(std::make_exception_ptr(std::runtime_error(error)));
|
||||
} else {
|
||||
test_task.finish();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::get_and_update_ttl.set(r, [&ctx, &cfg] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
uint32_t ttl = cfg.task_ttl_seconds();
|
||||
cfg.task_ttl_seconds.set(boost::lexical_cast<uint32_t>(req->query_parameters["ttl"]));
|
||||
co_return json::json_return_type(ttl);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,22 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
#include "db/config.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager_test(http_context& ctx, routes& r, db::config& cfg);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "atomic_cell.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "auth/allow_all_authenticator.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "auth/allow_all_authorizer.hh"
|
||||
|
||||
@@ -3,7 +3,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -1,3 +1,21 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
@@ -5,7 +23,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "auth/authenticated_user.hh"
|
||||
|
||||
@@ -1,3 +1,21 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
@@ -5,7 +23,20 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: (AGPL-3.0-or-later and Apache-2.0)
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user