Compare commits
256 Commits
next-5.1
...
branch-5.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6c2624c86 | ||
|
|
f7d9afd209 | ||
|
|
b011cc2e78 | ||
|
|
fb466dd7b7 | ||
|
|
697e090659 | ||
|
|
2c518f3131 | ||
|
|
e941a5ac34 | ||
|
|
3a7ce5e8aa | ||
|
|
efa4f312f5 | ||
|
|
fb4b71ea02 | ||
|
|
7387922a29 | ||
|
|
cb78c3bf2c | ||
|
|
aeac63a3ee | ||
|
|
e7b50fb8d3 | ||
|
|
6b21f2a351 | ||
|
|
0db8e627a5 | ||
|
|
f1121d2149 | ||
|
|
a0ca8abe42 | ||
|
|
8bceac1713 | ||
|
|
6bcc7c6ed5 | ||
|
|
67f85875cc | ||
|
|
8b874cd4e4 | ||
|
|
b08c582134 | ||
|
|
41556b5f63 | ||
|
|
23e7e594c0 | ||
|
|
e6ac13314d | ||
|
|
382d815459 | ||
|
|
a867b2c0e5 | ||
|
|
846edf78c6 | ||
|
|
0ccc07322b | ||
|
|
0b170192a1 | ||
|
|
fd4b2a3319 | ||
|
|
416929fb2a | ||
|
|
9d8d7048eb | ||
|
|
bae4155ab2 | ||
|
|
d6e2a326cf | ||
|
|
15645ff40b | ||
|
|
a808fc7172 | ||
|
|
dd260bfa82 | ||
|
|
c46935ed5c | ||
|
|
985d6bc4c2 | ||
|
|
7673ff4ae3 | ||
|
|
c441eebf46 | ||
|
|
bf4fa80dd7 | ||
|
|
2010231fe9 | ||
|
|
0a51eb55e3 | ||
|
|
d9c6c6283b | ||
|
|
90a5344261 | ||
|
|
68da667288 | ||
|
|
9adb1a8fdd | ||
|
|
7623fe01b7 | ||
|
|
3b0a0c4876 | ||
|
|
019d5cde1b | ||
|
|
a2e255833a | ||
|
|
f4aa5cacb1 | ||
|
|
8ea9a16f9e | ||
|
|
1aa5283a38 | ||
|
|
2e7b1858ad | ||
|
|
2542b57ddc | ||
|
|
01a9871fc3 | ||
|
|
6bb7fac8d8 | ||
|
|
5dff7489b1 | ||
|
|
2775b1d136 | ||
|
|
2ae5675c0f | ||
|
|
d507ad9424 | ||
|
|
413af945c0 | ||
|
|
9a71680dc7 | ||
|
|
94b8baa797 | ||
|
|
e372a5fe0a | ||
|
|
692e5ed175 | ||
|
|
5a299f65ff | ||
|
|
f4ae2fa5f9 | ||
|
|
07c20bdfea | ||
|
|
8a36c4be54 | ||
|
|
bf92c2b44c | ||
|
|
0e388d2140 | ||
|
|
288eb9d231 | ||
|
|
9219a59802 | ||
|
|
f9cea4dc51 | ||
|
|
081b2b76cc | ||
|
|
dfb229a18a | ||
|
|
60da855c2d | ||
|
|
1718861e94 | ||
|
|
e03e9b1abe | ||
|
|
26c51025c1 | ||
|
|
5c39a4524a | ||
|
|
9823e8d9c5 | ||
|
|
b48c9cae95 | ||
|
|
14077d2def | ||
|
|
25508705a8 | ||
|
|
347da028e9 | ||
|
|
874fa15202 | ||
|
|
99c03cb2af | ||
|
|
6c35d3c5cd | ||
|
|
707622ce15 | ||
|
|
bab36b604c | ||
|
|
8840711e79 | ||
|
|
af18bb3fe9 | ||
|
|
6003cba7a8 | ||
|
|
e9afd076eb | ||
|
|
c5f732d42a | ||
|
|
13a1408135 | ||
|
|
6685e00dd4 | ||
|
|
350bb57291 | ||
|
|
e186ad5b6c | ||
|
|
139e9afc89 | ||
|
|
a42c6f190c | ||
|
|
2b8f0cbd97 | ||
|
|
a2a762e18d | ||
|
|
aa973e2b9e | ||
|
|
e0777f1112 | ||
|
|
cc6311cbc7 | ||
|
|
0354e13718 | ||
|
|
2750d2e94b | ||
|
|
b4383a389b | ||
|
|
f667c5923a | ||
|
|
e4ba0c56df | ||
|
|
329d55cc4f | ||
|
|
b956293f47 | ||
|
|
6a8c2d3f56 | ||
|
|
27a35c7f98 | ||
|
|
d83134a245 | ||
|
|
b844d14829 | ||
|
|
184df0393e | ||
|
|
1b550dd301 | ||
|
|
01ce53d7fb | ||
|
|
e9c7f89b32 | ||
|
|
93f468c12c | ||
|
|
e54ae9efd9 | ||
|
|
ef40e59c0e | ||
|
|
8c56b0b268 | ||
|
|
fc78d88783 | ||
|
|
31a20c4c54 | ||
|
|
7e42bcfd61 | ||
|
|
2107ffe2d2 | ||
|
|
5a97a1060e | ||
|
|
2b0487c900 | ||
|
|
d3b3c53d9f | ||
|
|
50c2c1b1d4 | ||
|
|
aa647a637a | ||
|
|
2c0040fcb3 | ||
|
|
54564adb7c | ||
|
|
839876e8f2 | ||
|
|
36002e2b7c | ||
|
|
91a8f9e09b | ||
|
|
bc29f350dd | ||
|
|
4fe571f470 | ||
|
|
ebf38eaead | ||
|
|
1c82766f33 | ||
|
|
e1f78c33b4 | ||
|
|
0634b5f734 | ||
|
|
6f020b26e1 | ||
|
|
7f8dcc5657 | ||
|
|
20451760fe | ||
|
|
51b031d04e | ||
|
|
82d1446ca9 | ||
|
|
e0acb0766d | ||
|
|
4f26d489a0 | ||
|
|
43cbc5c836 | ||
|
|
f0c521efdf | ||
|
|
b9a61c8e9a | ||
|
|
32aa1e5287 | ||
|
|
da6a126d79 | ||
|
|
d07e902983 | ||
|
|
3c0fc42f84 | ||
|
|
964ccf9192 | ||
|
|
dfdc128faf | ||
|
|
299122e78d | ||
|
|
23a34d7e42 | ||
|
|
67a2f3aa67 | ||
|
|
66e8cf8cea | ||
|
|
35b66c844c | ||
|
|
9e7a1340b9 | ||
|
|
d5a0750ef3 | ||
|
|
618c483c73 | ||
|
|
f10fd1bc12 | ||
|
|
1891f10141 | ||
|
|
b177dacd36 | ||
|
|
283a722923 | ||
|
|
522d0a81e7 | ||
|
|
cd13911db4 | ||
|
|
32423ebc38 | ||
|
|
97054ee691 | ||
|
|
34085c364f | ||
|
|
323521f4c8 | ||
|
|
1ad59d6a7b | ||
|
|
d3045df9c9 | ||
|
|
be48b7aa8b | ||
|
|
3c4688bcfa | ||
|
|
cc22021876 | ||
|
|
c9e79cb4a3 | ||
|
|
f28542a71e | ||
|
|
527a75a4c0 | ||
|
|
df00f8fcfb | ||
|
|
41a00c744f | ||
|
|
2d7b6cd702 | ||
|
|
ff79228178 | ||
|
|
1803124cc6 | ||
|
|
6fcbf66bfb | ||
|
|
e9a3dee234 | ||
|
|
279cd44c7f | ||
|
|
c99f768381 | ||
|
|
89a540d54a | ||
|
|
338edcc02e | ||
|
|
a8eb5164b2 | ||
|
|
9accb44f9c | ||
|
|
8878007106 | ||
|
|
9da666e778 | ||
|
|
aca355dec1 | ||
|
|
efbb2efd3f | ||
|
|
44dc5c4a1d | ||
|
|
6b34ba3a4f | ||
|
|
f1e25cb4a6 | ||
|
|
c9798746ae | ||
|
|
7f70ffc5ce | ||
|
|
551636ec89 | ||
|
|
e1130a01e7 | ||
|
|
b0233cb7c5 | ||
|
|
e480c5bf4d | ||
|
|
7d90f7e93f | ||
|
|
3e6e8579c6 | ||
|
|
3e98e17d18 | ||
|
|
a214f8cf6e | ||
|
|
e8b92fe34d | ||
|
|
fa479c84ac | ||
|
|
40c26dd2c5 | ||
|
|
2c6f069fd1 | ||
|
|
e27dff0c50 | ||
|
|
3f03260ffb | ||
|
|
1315135fca | ||
|
|
f92622e0de | ||
|
|
3bca608db5 | ||
|
|
a93b72d5dd | ||
|
|
d58ca2edbd | ||
|
|
75740ace2a | ||
|
|
d7a1bf6331 | ||
|
|
bbd7d657cc | ||
|
|
f5bf4c81d1 | ||
|
|
02e8336659 | ||
|
|
601812e11b | ||
|
|
ea466320d2 | ||
|
|
25ea831a15 | ||
|
|
8648c79c9e | ||
|
|
7ae4d0e6f8 | ||
|
|
f3564db941 | ||
|
|
97caf12836 | ||
|
|
839d9ef41a | ||
|
|
782bd50f92 | ||
|
|
0a4d971b4a | ||
|
|
22562f767f | ||
|
|
eb80dd1db5 | ||
|
|
51d699ee21 | ||
|
|
83a33bff8c | ||
|
|
273563b9ad | ||
|
|
891990ec09 | ||
|
|
da0cd2b107 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1,3 +1,2 @@
|
||||
*.cc diff=cpp
|
||||
*.hh diff=cpp
|
||||
*.svg binary
|
||||
|
||||
22
.github/CODEOWNERS
vendored
22
.github/CODEOWNERS
vendored
@@ -2,14 +2,14 @@
|
||||
auth/* @elcallio @vladzcloudius
|
||||
|
||||
# CACHE
|
||||
row_cache* @tgrabiec
|
||||
*mutation* @tgrabiec
|
||||
test/boost/mvcc* @tgrabiec
|
||||
row_cache* @tgrabiec @haaawk
|
||||
*mutation* @tgrabiec @haaawk
|
||||
test/boost/mvcc* @tgrabiec @haaawk
|
||||
|
||||
# CDC
|
||||
cdc/* @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
cdc/* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
|
||||
# COMMITLOG / BATCHLOG
|
||||
db/commitlog/* @elcallio
|
||||
@@ -28,12 +28,8 @@ transport/*
|
||||
cql3/* @tgrabiec @psarna @cvybhu
|
||||
|
||||
# COUNTERS
|
||||
counters* @jul-stas
|
||||
tests/counter_test* @jul-stas
|
||||
|
||||
# DOCS
|
||||
docs/* @annastuchlik @tzach
|
||||
docs/alternator @annastuchlik @tzach @nyh @psarna
|
||||
counters* @haaawk @jul-stas
|
||||
tests/counter_test* @haaawk @jul-stas
|
||||
|
||||
# GOSSIP
|
||||
gms/* @tgrabiec @asias
|
||||
@@ -78,7 +74,7 @@ alternator/* @nyh @psarna
|
||||
test/alternator/* @nyh @psarna
|
||||
|
||||
# HINTED HANDOFF
|
||||
db/hints/* @piodul @vladzcloudius
|
||||
db/hints/* @haaawk @piodul @vladzcloudius
|
||||
|
||||
# REDIS
|
||||
redis/* @nyh @syuu1228
|
||||
|
||||
35
.github/workflows/docs-pages.yaml
vendored
35
.github/workflows/docs-pages.yaml
vendored
@@ -1,35 +0,0 @@
|
||||
name: "Docs / Publish"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Set up env
|
||||
run: make -C docs setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs multiversion
|
||||
- name: Build redirects
|
||||
run: make -C docs redirects
|
||||
- name: Deploy docs to GitHub Pages
|
||||
run: ./docs/_utils/deploy.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
29
.github/workflows/docs-pages@v2.yaml
vendored
Normal file
29
.github/workflows/docs-pages@v2.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "Docs / Publish"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build docs
|
||||
run: make -C docs multiversion
|
||||
- name: Deploy
|
||||
run: ./docs/_utils/deploy.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
28
.github/workflows/docs-pr.yaml
vendored
28
.github/workflows/docs-pr.yaml
vendored
@@ -1,28 +0,0 @@
|
||||
name: "Docs / Build PR"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Set up env
|
||||
run: make -C docs setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs test
|
||||
25
.github/workflows/docs-pr@v1.yaml
vendored
Normal file
25
.github/workflows/docs-pr@v1.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: "Docs / Build PR"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build docs
|
||||
run: make -C docs test
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -22,7 +22,6 @@ resources
|
||||
.pytest_cache
|
||||
/expressions.tokens
|
||||
tags
|
||||
!db/tags/
|
||||
testlog
|
||||
test/*/*.reject
|
||||
.vscode
|
||||
@@ -30,4 +29,3 @@ docs/_build
|
||||
docs/poetry.lock
|
||||
compile_commands.json
|
||||
.ccls-cache/
|
||||
.mypy_cache
|
||||
|
||||
3
.mailmap
3
.mailmap
@@ -1,3 +0,0 @@
|
||||
Avi Kivity <avi@scylladb.com> Avi Kivity' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Raphael S. Carvalho <raphaelsc@scylladb.com> Raphael S. Carvalho' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Pavel Emelyanov <xemul@scylladb.com> Pavel Emelyanov' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
@@ -337,6 +337,7 @@ set(scylla_sources
|
||||
compaction/size_tiered_compaction_strategy.cc
|
||||
compaction/time_window_compaction_strategy.cc
|
||||
compress.cc
|
||||
connection_notifier.cc
|
||||
converting_mutation_partition_applier.cc
|
||||
counters.cc
|
||||
cql3/abstract_marker.cc
|
||||
@@ -349,7 +350,6 @@ set(scylla_sources
|
||||
cql3/cql3_type.cc
|
||||
cql3/expr/expression.cc
|
||||
cql3/expr/prepare_expr.cc
|
||||
cql3/expr/restrictions.cc
|
||||
cql3/functions/aggregate_fcts.cc
|
||||
cql3/functions/castas_fcts.cc
|
||||
cql3/functions/error_injection_fcts.cc
|
||||
@@ -363,6 +363,7 @@ set(scylla_sources
|
||||
cql3/prepare_context.cc
|
||||
cql3/query_options.cc
|
||||
cql3/query_processor.cc
|
||||
cql3/relation.cc
|
||||
cql3/restrictions/statement_restrictions.cc
|
||||
cql3/result_set.cc
|
||||
cql3/role_name.cc
|
||||
@@ -373,6 +374,7 @@ set(scylla_sources
|
||||
cql3/selection/selector_factories.cc
|
||||
cql3/selection/simple_selector.cc
|
||||
cql3/sets.cc
|
||||
cql3/single_column_relation.cc
|
||||
cql3/statements/alter_keyspace_statement.cc
|
||||
cql3/statements/alter_service_level_statement.cc
|
||||
cql3/statements/alter_table_statement.cc
|
||||
@@ -425,6 +427,7 @@ set(scylla_sources
|
||||
cql3/statements/truncate_statement.cc
|
||||
cql3/statements/update_statement.cc
|
||||
cql3/statements/use_statement.cc
|
||||
cql3/token_relation.cc
|
||||
cql3/type_json.cc
|
||||
cql3/untyped_result_set.cc
|
||||
cql3/update_parameters.cc
|
||||
@@ -450,7 +453,6 @@ set(scylla_sources
|
||||
db/large_data_handler.cc
|
||||
db/legacy_schema_migrator.cc
|
||||
db/marshal/type_parser.cc
|
||||
db/rate_limiter.cc
|
||||
db/schema_tables.cc
|
||||
db/size_estimates_virtual_reader.cc
|
||||
db/snapshot-ctl.cc
|
||||
@@ -466,10 +468,10 @@ set(scylla_sources
|
||||
dht/murmur3_partitioner.cc
|
||||
dht/range_streamer.cc
|
||||
dht/token.cc
|
||||
replica/distributed_loader.cc
|
||||
distributed_loader.cc
|
||||
duration.cc
|
||||
exceptions/exceptions.cc
|
||||
readers/mutation_readers.cc
|
||||
flat_mutation_reader.cc
|
||||
frozen_mutation.cc
|
||||
frozen_schema.cc
|
||||
generic_server.cc
|
||||
@@ -489,7 +491,7 @@ set(scylla_sources
|
||||
index/secondary_index_manager.cc
|
||||
init.cc
|
||||
keys.cc
|
||||
utils/lister.cc
|
||||
lister.cc
|
||||
locator/abstract_replication_strategy.cc
|
||||
locator/azure_snitch.cc
|
||||
locator/ec2_multi_region_snitch.cc
|
||||
@@ -507,7 +509,7 @@ set(scylla_sources
|
||||
locator/token_metadata.cc
|
||||
lang/lua.cc
|
||||
main.cc
|
||||
replica/memtable.cc
|
||||
memtable.cc
|
||||
message/messaging_service.cc
|
||||
multishard_mutation_query.cc
|
||||
mutation.cc
|
||||
@@ -516,7 +518,7 @@ set(scylla_sources
|
||||
mutation_partition_serializer.cc
|
||||
mutation_partition_view.cc
|
||||
mutation_query.cc
|
||||
readers/mutation_reader.cc
|
||||
mutation_reader.cc
|
||||
mutation_writer/feed_writers.cc
|
||||
mutation_writer/multishard_writer.cc
|
||||
mutation_writer/partition_based_splitting_writer.cc
|
||||
@@ -526,7 +528,6 @@ set(scylla_sources
|
||||
partition_version.cc
|
||||
querier.cc
|
||||
query.cc
|
||||
query_ranges_to_vnodes.cc
|
||||
query-result-set.cc
|
||||
raft/fsm.cc
|
||||
raft/log.cc
|
||||
@@ -561,7 +562,6 @@ set(scylla_sources
|
||||
schema_registry.cc
|
||||
serializer.cc
|
||||
service/client_state.cc
|
||||
service/forward_service.cc
|
||||
service/migration_manager.cc
|
||||
service/misc_services.cc
|
||||
service/pager/paging_state.cc
|
||||
@@ -574,6 +574,7 @@ set(scylla_sources
|
||||
service/qos/qos_common.cc
|
||||
service/qos/service_level_controller.cc
|
||||
service/qos/standard_service_level_distributed_data_accessor.cc
|
||||
service/raft/raft_gossip_failure_detector.cc
|
||||
service/raft/raft_group_registry.cc
|
||||
service/raft/raft_rpc.cc
|
||||
service/raft/raft_sys_table_storage.cc
|
||||
|
||||
@@ -18,5 +18,3 @@ If you need help formatting or sending patches, [check out these instructions](h
|
||||
The Scylla C++ source code uses the [Seastar coding style](https://github.com/scylladb/seastar/blob/master/coding-style.md) so please adhere to that in your patches. Note that Scylla code is written with `using namespace seastar`, so should not explicitly add the `seastar::` prefix to Seastar symbols. You will usually not need to add `using namespace seastar` to new source files, because most Scylla header files have `#include "seastarx.hh"`, which does this.
|
||||
|
||||
Header files in Scylla must be self-contained, i.e., each can be included without having to include specific other headers first. To verify that your change did not break this property, run `ninja dev-headers`. If you added or removed header files, you must `touch configure.py` first - this will cause `configure.py` to be automatically re-run to generate a fresh list of header files.
|
||||
|
||||
For more criteria on what reviewers consider good code, see the [review checklist](https://github.com/scylladb/scylla/blob/master/docs/dev/review-checklist.md).
|
||||
|
||||
36
HACKING.md
36
HACKING.md
@@ -383,40 +383,6 @@ Open the link printed at the end. Be horrified. Go and write more tests.
|
||||
|
||||
For more details see `./scripts/coverage.py --help`.
|
||||
|
||||
### Resolving stack backtraces
|
||||
|
||||
Scylla may print stack backtraces to the log for several reasons.
|
||||
For example:
|
||||
- When aborting (e.g. due to assertion failure, internal error, or segfault)
|
||||
- When detecting seastar reactor stalls (where a seastar task runs for a long time without yielding the cpu to other tasks on that shard)
|
||||
|
||||
The backtraces contain code pointers so they are not very helpful without resolving into code locations.
|
||||
To resolve the backtraces, one needs the scylla relocatable package that contains the scylla binary (with debug information),
|
||||
as well as the dynamic libraries it is linked against.
|
||||
|
||||
Builds from our automated build system are uploaded to the cloud
|
||||
and can be searched on http://backtrace.scylladb.com/
|
||||
|
||||
Make sure you have the scylla server exact `build-id` to locate
|
||||
its respective relocatable package, required for decoding backtraces it prints.
|
||||
|
||||
The build-id is printed to the system log when scylla starts.
|
||||
It can also be found by executing `scylla --build-id`, or
|
||||
by using the `file` utility, for example:
|
||||
```
|
||||
$ scylla --build-id
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
|
||||
$ file scylla
|
||||
scylla: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=4cba12e6eb290a406bfa4930918db23941fd4be3, with debug_info, not stripped, too many notes (256)
|
||||
```
|
||||
|
||||
To find the build-id of a coredump, use the `eu-unstrip` utility as follows:
|
||||
```
|
||||
$ eu-unstrip -n --core <coredump> | awk '/scylla$/ { s=$2; sub(/@.*$/, "", s); print s; exit(0); }'
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
```
|
||||
|
||||
### Core dump debugging
|
||||
|
||||
See [debugging.md](docs/dev/debugging.md).
|
||||
See [debugging.md](debugging.md).
|
||||
|
||||
@@ -42,7 +42,7 @@ For further information, please see:
|
||||
* [Docker image build documentation] for information on how to build Docker images.
|
||||
|
||||
[developer documentation]: HACKING.md
|
||||
[build documentation]: docs/dev/building.md
|
||||
[build documentation]: docs/guides/building.md
|
||||
[docker image build documentation]: dist/docker/debian/README.md
|
||||
|
||||
## Running Scylla
|
||||
@@ -65,7 +65,7 @@ $ ./tools/toolchain/dbuild ./build/release/scylla --help
|
||||
|
||||
## Testing
|
||||
|
||||
See [test.py manual](docs/dev/testing.md).
|
||||
See [test.py manual](docs/guides/testing.md).
|
||||
|
||||
## Scylla APIs and compatibility
|
||||
By default, Scylla is compatible with Apache Cassandra and its APIs - CQL and
|
||||
@@ -78,7 +78,7 @@ and the current compatibility of this feature as well as Scylla-specific extensi
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation can be found [here](docs/dev/README.md).
|
||||
Documentation can be found [here](https://scylla.docs.scylladb.com).
|
||||
Seastar documentation can be found [here](http://docs.seastar.io/master/index.html).
|
||||
User documentation can be found [here](https://docs.scylladb.com/).
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.1.19
|
||||
VERSION=5.0.13
|
||||
|
||||
if test -f version
|
||||
then
|
||||
@@ -68,7 +68,7 @@ then
|
||||
SCYLLA_RELEASE=$(cat version | awk -F'-' '{print $2}')
|
||||
else
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1 --abbrev=12)
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1)
|
||||
SCYLLA_VERSION=$VERSION
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
|
||||
2
abseil
2
abseil
Submodule abseil updated: 9e408e050f...f70eadadd7
@@ -129,7 +129,7 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::strin
|
||||
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
const column_definition* salted_hash_col = schema->get_column_definition(bytes("salted_hash"));
|
||||
if (!salted_hash_col) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
}
|
||||
auto selection = cql3::selection::selection::for_columns(schema, {salted_hash_col});
|
||||
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id}, selection->get_query_options());
|
||||
@@ -145,11 +145,11 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::strin
|
||||
|
||||
auto result_set = builder.build();
|
||||
if (result_set->empty()) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
}
|
||||
const bytes_opt& salted_hash = result_set->rows().front().front(); // We only asked for 1 row and 1 column
|
||||
if (!salted_hash) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
}
|
||||
co_return value_cast<sstring>(utf8_type->deserialize(*salted_hash));
|
||||
}
|
||||
|
||||
@@ -73,9 +73,6 @@ public:
|
||||
static api_error serialization(std::string msg) {
|
||||
return api_error("SerializationException", std::move(msg));
|
||||
}
|
||||
static api_error table_not_found(std::string msg) {
|
||||
return api_error("TableNotFoundException", std::move(msg));
|
||||
}
|
||||
static api_error internal(std::string msg) {
|
||||
return api_error("InternalServerError", std::move(msg), reply::status_type::internal_server_error);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -81,10 +81,10 @@ namespace parsed {
|
||||
class path;
|
||||
};
|
||||
|
||||
const std::map<sstring, sstring>& get_tags_of_table(schema_ptr schema);
|
||||
std::optional<std::string> find_tag(const schema& s, const sstring& tag);
|
||||
future<> update_tags(service::migration_manager& mm, schema_ptr schema, std::map<sstring, sstring>&& tags_map);
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
// Wraps the db::get_tags_of_table and throws if the table is missing the tags extension.
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
|
||||
|
||||
// An attribute_path_map object is used to hold data for various attributes
|
||||
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
|
||||
@@ -144,11 +144,6 @@ template<typename T>
|
||||
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
|
||||
|
||||
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
|
||||
// attrs_to_get lists which top-level attribute are needed, and possibly also
|
||||
// which part of the top-level attribute is really needed (when nested
|
||||
// attribute paths appeared in the query).
|
||||
// Most code actually uses optional<attrs_to_get>. There, a disengaged
|
||||
// optional means we should get all attributes, not specific ones.
|
||||
using attrs_to_get = attribute_path_map<std::monostate>;
|
||||
|
||||
|
||||
@@ -196,7 +191,6 @@ public:
|
||||
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request);
|
||||
|
||||
future<> start();
|
||||
future<> stop() { return make_ready_future<>(); }
|
||||
@@ -212,25 +206,21 @@ public:
|
||||
private:
|
||||
friend class rmw_operation;
|
||||
|
||||
static bool is_alternator_keyspace(const sstring& ks_name);
|
||||
static sstring make_keyspace_name(const sstring& table_name);
|
||||
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr);
|
||||
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&);
|
||||
|
||||
public:
|
||||
public:
|
||||
static std::optional<rjson::value> describe_single_item(schema_ptr,
|
||||
const query::partition_slice&,
|
||||
const cql3::selection::selection&,
|
||||
const query::result&,
|
||||
const std::optional<attrs_to_get>&);
|
||||
|
||||
static future<std::vector<rjson::value>> describe_multi_item(schema_ptr schema,
|
||||
const query::partition_slice&& slice,
|
||||
shared_ptr<cql3::selection::selection> selection,
|
||||
foreign_ptr<lw_shared_ptr<query::result>> query_result,
|
||||
shared_ptr<const std::optional<attrs_to_get>> attrs_to_get);
|
||||
const attrs_to_get&);
|
||||
|
||||
static void describe_single_item(const cql3::selection::selection&,
|
||||
const std::vector<bytes_opt>&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
const attrs_to_get&,
|
||||
rjson::value&,
|
||||
bool = false);
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
namespace alternator {
|
||||
|
||||
template <typename Func, typename Result = std::result_of_t<Func(expressionsParser&)>>
|
||||
Result do_with_parser(std::string_view input, Func&& f) {
|
||||
Result do_with_parser(std::string input, Func&& f) {
|
||||
expressionsLexer::InputStreamType input_stream{
|
||||
reinterpret_cast<const ANTLR_UINT8*>(input.data()),
|
||||
ANTLR_ENC_UTF8,
|
||||
@@ -44,7 +44,7 @@ Result do_with_parser(std::string_view input, Func&& f) {
|
||||
}
|
||||
|
||||
parsed::update_expression
|
||||
parse_update_expression(std::string_view query) {
|
||||
parse_update_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::update_expression));
|
||||
} catch (...) {
|
||||
@@ -53,7 +53,7 @@ parse_update_expression(std::string_view query) {
|
||||
}
|
||||
|
||||
std::vector<parsed::path>
|
||||
parse_projection_expression(std::string_view query) {
|
||||
parse_projection_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::projection_expression));
|
||||
} catch (...) {
|
||||
@@ -62,7 +62,7 @@ parse_projection_expression(std::string_view query) {
|
||||
}
|
||||
|
||||
parsed::condition_expression
|
||||
parse_condition_expression(std::string_view query) {
|
||||
parse_condition_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::condition_expression));
|
||||
} catch (...) {
|
||||
|
||||
@@ -26,9 +26,9 @@ public:
|
||||
using runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
parsed::update_expression parse_update_expression(std::string_view query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string_view query);
|
||||
parsed::condition_expression parse_condition_expression(std::string_view query);
|
||||
parsed::update_expression parse_update_expression(std::string query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string query);
|
||||
parsed::condition_expression parse_condition_expression(std::string query);
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
|
||||
@@ -14,14 +14,11 @@
|
||||
#include "rapidjson/writer.h"
|
||||
#include "concrete_types.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
#include "position_in_partition.hh"
|
||||
|
||||
static logging::logger slogger("alternator-serialization");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
|
||||
type_info type_info_from_string(std::string_view type) {
|
||||
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {
|
||||
{"S", {alternator_type::S, utf8_type}},
|
||||
@@ -164,43 +161,32 @@ bytes get_key_column_value(const rjson::value& item, const column_definition& co
|
||||
return get_key_from_typed_value(*key_typed_value, column);
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry whose key is the type and the value is the encoded value.
|
||||
// If this type does not match the desired "type_str", an api_error::validation
|
||||
// error is thrown (the "name" parameter is the name of the column which will
|
||||
// mentioned in the exception message).
|
||||
// If the type does match, a reference to the encoded value is returned.
|
||||
static const rjson::value& get_typed_value(const rjson::value& key_typed_value, std::string_view type_str, std::string_view name, std::string_view value_name) {
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for {} {}: {}",
|
||||
value_name, name, key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (rjson::to_string_view(it->name) != type_str) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for {} {}, got type {}",
|
||||
type_str, value_name, name, it->name));
|
||||
}
|
||||
return it->value;
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry, whose key is the type (expected to match the key column's type)
|
||||
// and the value is the encoded value.
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column) {
|
||||
auto& value = get_typed_value(key_typed_value, type_to_string(column.type), column.name_as_text(), "key column");
|
||||
std::string_view value_view = rjson::to_string_view(value);
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for key column {}: {}",
|
||||
column.name_as_text(), key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (it->name != type_to_string(column.type)) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for key column {}, got type {}",
|
||||
type_to_string(column.type), column.name_as_text(), it->name));
|
||||
}
|
||||
std::string_view value_view = rjson::to_string_view(it->value);
|
||||
if (value_view.empty()) {
|
||||
throw api_error::validation(
|
||||
format("The AttributeValue for a key attribute cannot contain an empty string value. Key: {}", column.name_as_text()));
|
||||
}
|
||||
if (column.type == bytes_type) {
|
||||
return rjson::base64_decode(value);
|
||||
return rjson::base64_decode(it->value);
|
||||
} else {
|
||||
return column.type->from_string(value_view);
|
||||
return column.type->from_string(rjson::to_string_view(it->value));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -251,39 +237,6 @@ clustering_key ck_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
return clustering_key::from_exploded(raw_ck);
|
||||
}
|
||||
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
auto ck = ck_from_json(item, schema);
|
||||
if (is_alternator_keyspace(schema->ks_name())) {
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
const auto region_item = rjson::find(item, scylla_paging_region);
|
||||
const auto weight_item = rjson::find(item, scylla_paging_weight);
|
||||
if (bool(region_item) != bool(weight_item)) {
|
||||
throw api_error::validation("Malformed value object: region and weight has to be either both missing or both present");
|
||||
}
|
||||
partition_region region;
|
||||
bound_weight weight;
|
||||
if (region_item) {
|
||||
auto region_view = rjson::to_string_view(get_typed_value(*region_item, "S", scylla_paging_region, "key region"));
|
||||
auto weight_view = rjson::to_string_view(get_typed_value(*weight_item, "N", scylla_paging_weight, "key weight"));
|
||||
auto region = parse_partition_region(region_view);
|
||||
if (weight_view == "-1") {
|
||||
weight = bound_weight::before_all_prefixed;
|
||||
} else if (weight_view == "0") {
|
||||
weight = bound_weight::equal;
|
||||
} else if (weight_view == "1") {
|
||||
weight = bound_weight::after_all_prefixed;
|
||||
} else {
|
||||
throw std::runtime_error(fmt::format("Invalid value for weight: {}", weight_view));
|
||||
}
|
||||
return position_in_partition(region, weight, region == partition_region::clustered ? std::optional(std::move(ck)) : std::nullopt);
|
||||
}
|
||||
if (ck.is_empty()) {
|
||||
return position_in_partition(position_in_partition::partition_start_tag_t());
|
||||
}
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error::validation(format("{}: invalid number object", diagnostic));
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
|
||||
class position_in_partition;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
enum class alternator_type : int8_t {
|
||||
@@ -35,9 +33,6 @@ struct type_representation {
|
||||
data_type dtype;
|
||||
};
|
||||
|
||||
inline constexpr std::string_view scylla_paging_region(":scylla:paging:region");
|
||||
inline constexpr std::string_view scylla_paging_weight(":scylla:paging:weight");
|
||||
|
||||
type_info type_info_from_string(std::string_view type);
|
||||
type_representation represent_type(alternator_type atype);
|
||||
|
||||
@@ -52,7 +47,6 @@ rjson::value json_key_column_value(bytes_view cell, const column_definition& col
|
||||
|
||||
partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
|
||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
|
||||
|
||||
// If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
|
||||
// raises ValidationException with diagnostic.
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
#include "auth.hh"
|
||||
#include <cctype>
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
@@ -151,10 +152,8 @@ public:
|
||||
|
||||
protected:
|
||||
void generate_error_reply(reply& rep, const api_error& err) {
|
||||
rjson::value results = rjson::empty_object();
|
||||
rjson::add(results, "__type", rjson::from_string("com.amazonaws.dynamodb.v20120810#" + err._type));
|
||||
rjson::add(results, "message", err._msg);
|
||||
rep._content = rjson::print(std::move(results));
|
||||
rep._content += "{\"__type\":\"com.amazonaws.dynamodb.v20120810#" + err._type + "\"," +
|
||||
"\"message\":\"" + err._msg + "\"}";
|
||||
rep._status = err._http_code;
|
||||
slogger.trace("api_handler error case: {}", rep._content);
|
||||
}
|
||||
@@ -200,9 +199,10 @@ protected:
|
||||
// It's very easy to get a list of all live nodes on the cluster,
|
||||
// using _gossiper().get_live_members(). But getting
|
||||
// just the list of live nodes in this DC needs more elaborate code:
|
||||
auto& topology = _proxy.get_token_metadata_ptr()->get_topology();
|
||||
sstring local_dc = topology.get_datacenter();
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes = topology.get_datacenter_endpoints().at(local_dc);
|
||||
sstring local_dc = locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(
|
||||
utils::fb_utilities::get_broadcast_address());
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes =
|
||||
_proxy.get_token_metadata_ptr()->get_topology().get_datacenter_endpoints().at(local_dc);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
if (_gossiper.is_alive(ip)) {
|
||||
rjson::push_back(results, rjson::from_string(ip.to_sstring()));
|
||||
@@ -520,9 +520,6 @@ server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gos
|
||||
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
} {
|
||||
}
|
||||
|
||||
@@ -614,7 +611,7 @@ future<> server::json_parser::stop() {
|
||||
|
||||
const char* api_error::what() const noexcept {
|
||||
if (_what_string.empty()) {
|
||||
_what_string = format("{} {}: {}", static_cast<int>(_http_code), _type, _msg);
|
||||
_what_string = format("{} {}: {}", _http_code, _type, _msg);
|
||||
}
|
||||
return _what_string.c_str();
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "gms/feature_service.hh"
|
||||
|
||||
#include "executor.hh"
|
||||
#include "tags_extension.hh"
|
||||
#include "rmw_operation.hh"
|
||||
|
||||
/**
|
||||
@@ -837,14 +838,14 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
static const bytes op_column_name = cdc::log_meta_column_name_bytes("operation");
|
||||
static const bytes eor_column_name = cdc::log_meta_column_name_bytes("end_of_batch");
|
||||
|
||||
std::optional<attrs_to_get> key_names = boost::copy_range<attrs_to_get>(
|
||||
auto key_names = boost::copy_range<attrs_to_get>(
|
||||
boost::range::join(std::move(base->partition_key_columns()), std::move(base->clustering_key_columns()))
|
||||
| boost::adaptors::transformed([&] (const column_definition& cdef) {
|
||||
return std::make_pair<std::string, attrs_to_get_node>(cdef.name_as_text(), {}); })
|
||||
);
|
||||
// Include all base table columns as values (in case pre or post is enabled).
|
||||
// This will include attributes not stored in the frozen map column
|
||||
std::optional<attrs_to_get> attr_names = boost::copy_range<attrs_to_get>(base->regular_columns()
|
||||
auto attr_names = boost::copy_range<attrs_to_get>(base->regular_columns()
|
||||
// this will include the :attrs column, which we will also force evaluating.
|
||||
// But not having this set empty forces out any cdc columns from actual result
|
||||
| boost::adaptors::transformed([] (const column_definition& cdef) {
|
||||
@@ -1049,10 +1050,10 @@ void executor::add_stream_options(const rjson::value& stream_specification, sche
|
||||
if (stream_enabled->GetBool()) {
|
||||
auto db = sp.data_dictionary();
|
||||
|
||||
if (!db.features().cdc) {
|
||||
if (!db.features().cluster_supports_cdc()) {
|
||||
throw api_error::validation("StreamSpecification: streams (CDC) feature not enabled in cluster.");
|
||||
}
|
||||
if (!db.features().alternator_streams) {
|
||||
if (!db.features().cluster_supports_alternator_streams()) {
|
||||
throw api_error::validation("StreamSpecification: alternator streams feature not enabled in cluster.");
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "schema.hh"
|
||||
#include "db/extensions.hh"
|
||||
|
||||
namespace db {
|
||||
namespace alternator {
|
||||
|
||||
class tags_extension : public schema_extension {
|
||||
public:
|
||||
@@ -37,9 +37,4 @@ private:
|
||||
std::map<sstring, sstring> _tags;
|
||||
};
|
||||
|
||||
// Information whether the view updates are synchronous is stored using the
|
||||
// SYNCHRONOUS_VIEW_UPDATES_TAG_KEY tag. Value of this tag is a stored as a
|
||||
// serialized boolean value ("true" or "false")
|
||||
static const sstring SYNCHRONOUS_VIEW_UPDATES_TAG_KEY("system:synchronous_view_updates");
|
||||
|
||||
}
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/lowres_clock.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/multiprecision/cpp_int.hpp>
|
||||
|
||||
@@ -45,8 +44,6 @@
|
||||
#include "alternator/controller.hh"
|
||||
#include "alternator/serialization.hh"
|
||||
#include "dht/sharder.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/tags/utils.hh"
|
||||
|
||||
#include "ttl.hh"
|
||||
|
||||
@@ -65,7 +62,7 @@ static const sstring TTL_TAG_KEY("system:ttl_attribute");
|
||||
|
||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.update_time_to_live++;
|
||||
if (!_proxy.data_dictionary().features().alternator_ttl) {
|
||||
if (!_proxy.data_dictionary().features().cluster_supports_alternator_ttl()) {
|
||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator-ttl' experimental feature is enabled on all nodes.");
|
||||
}
|
||||
|
||||
@@ -92,7 +89,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
}
|
||||
sstring attribute_name(v->GetString(), v->GetStringLength());
|
||||
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table(schema);
|
||||
if (enabled) {
|
||||
if (tags_map.contains(TTL_TAG_KEY)) {
|
||||
co_return api_error::validation("TTL is already enabled");
|
||||
@@ -109,7 +106,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
}
|
||||
tags_map.erase(TTL_TAG_KEY);
|
||||
}
|
||||
co_await db::update_tags(_mm, schema, std::move(tags_map));
|
||||
co_await update_tags(_mm, schema, std::move(tags_map));
|
||||
// Prepare the response, which contains a TimeToLiveSpecification
|
||||
// basically identical to the request's
|
||||
rjson::value response = rjson::empty_object();
|
||||
@@ -120,7 +117,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.describe_time_to_live++;
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table(schema);
|
||||
rjson::value desc = rjson::empty_object();
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
@@ -170,6 +167,8 @@ expiration_service::expiration_service(data_dictionary::database db, service::st
|
||||
: _db(db)
|
||||
, _proxy(proxy)
|
||||
{
|
||||
//FIXME: add metrics for the service
|
||||
//setup_metrics();
|
||||
}
|
||||
|
||||
// Convert the big_decimal used to represent expiration time to an integer.
|
||||
@@ -285,8 +284,7 @@ static future<> expire_item(service::storage_proxy& proxy,
|
||||
return proxy.mutate(std::vector<mutation>{std::move(m)},
|
||||
db::consistency_level::LOCAL_QUORUM,
|
||||
executor::default_timeout(), // FIXME - which timeout?
|
||||
qs.get_trace_state(), qs.get_permit(),
|
||||
db::allow_per_partition_rate_limit::no);
|
||||
qs.get_trace_state(), qs.get_permit());
|
||||
}
|
||||
|
||||
static size_t random_offset(size_t min, size_t max) {
|
||||
@@ -378,11 +376,12 @@ static std::vector<std::pair<dht::token_range, gms::inet_address>> get_secondary
|
||||
enum primary_or_secondary_t {primary, secondary};
|
||||
template<primary_or_secondary_t primary_or_secondary>
|
||||
class token_ranges_owned_by_this_shard {
|
||||
// ranges_holder_primary holds just the primary ranges themselves
|
||||
class ranges_holder_primary {
|
||||
template<primary_or_secondary_t> class ranges_holder;
|
||||
// ranges_holder<primary> holds just the primary ranges themselves
|
||||
template<> class ranges_holder<primary> {
|
||||
const dht::token_range_vector _token_ranges;
|
||||
public:
|
||||
ranges_holder_primary(const locator::effective_replication_map_ptr& erm, gms::gossiper& g, gms::inet_address ep)
|
||||
ranges_holder(const locator::effective_replication_map_ptr& erm, gms::inet_address ep)
|
||||
: _token_ranges(erm->get_primary_ranges(ep)) {}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
@@ -394,13 +393,13 @@ class token_ranges_owned_by_this_shard {
|
||||
};
|
||||
// ranges_holder<secondary> holds the secondary token ranges plus each
|
||||
// range's primary owner, needed to implement should_skip().
|
||||
class ranges_holder_secondary {
|
||||
template<> class ranges_holder<secondary> {
|
||||
std::vector<std::pair<dht::token_range, gms::inet_address>> _token_ranges;
|
||||
gms::gossiper& _gossiper;
|
||||
public:
|
||||
ranges_holder_secondary(const locator::effective_replication_map_ptr& erm, gms::gossiper& g, gms::inet_address ep)
|
||||
ranges_holder(const locator::effective_replication_map_ptr& erm, gms::inet_address ep)
|
||||
: _token_ranges(get_secondary_ranges(erm, ep))
|
||||
, _gossiper(g) {}
|
||||
, _gossiper(gms::get_local_gossiper()) {}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
return _token_ranges[i].first;
|
||||
@@ -415,21 +414,17 @@ class token_ranges_owned_by_this_shard {
|
||||
// _token_ranges will contain a list of token ranges owned by this node.
|
||||
// We'll further need to split each such range to the pieces owned by
|
||||
// the current shard, using _intersecter.
|
||||
using ranges_holder = std::conditional_t<
|
||||
primary_or_secondary == primary_or_secondary_t::primary,
|
||||
ranges_holder_primary,
|
||||
ranges_holder_secondary>;
|
||||
const ranges_holder _token_ranges;
|
||||
const ranges_holder<primary_or_secondary> _token_ranges;
|
||||
// NOTICE: _range_idx is used modulo _token_ranges size when accessing
|
||||
// the data to ensure that it doesn't go out of bounds
|
||||
size_t _range_idx;
|
||||
size_t _end_idx;
|
||||
std::optional<dht::selective_token_range_sharder> _intersecter;
|
||||
public:
|
||||
token_ranges_owned_by_this_shard(replica::database& db, gms::gossiper& g, schema_ptr s)
|
||||
token_ranges_owned_by_this_shard(replica::database& db, schema_ptr s)
|
||||
: _s(s)
|
||||
, _token_ranges(db.find_keyspace(s->ks_name()).get_effective_replication_map(),
|
||||
g, utils::fb_utilities::get_broadcast_address())
|
||||
utils::fb_utilities::get_broadcast_address())
|
||||
, _range_idx(random_offset(0, _token_ranges.size() - 1))
|
||||
, _end_idx(_range_idx + _token_ranges.size())
|
||||
{
|
||||
@@ -507,8 +502,6 @@ struct scan_ranges_context {
|
||||
selection = cql3::selection::selection::wildcard(s);
|
||||
query::partition_slice::option_set opts = selection->get_query_options();
|
||||
opts.set<query::partition_slice::option::allow_short_read>();
|
||||
// It is important that the scan bypass cache to avoid polluting it:
|
||||
opts.set<query::partition_slice::option::bypass_cache>();
|
||||
std::vector<query::clustering_range> ck_bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
auto partition_slice = query::partition_slice(std::move(ck_bounds), {}, std::move(regular_columns), opts);
|
||||
command = ::make_lw_shared<query::read_command>(s->id(), s->version(), partition_slice, proxy.get_max_result_size(partition_slice));
|
||||
@@ -528,14 +521,13 @@ struct scan_ranges_context {
|
||||
// Scan data in a list of token ranges in one table, looking for expired
|
||||
// items and deleting them.
|
||||
// Because of issue #9167, partition_ranges must have a single partition
|
||||
// range for this code to work correctly.
|
||||
// for this code to work correctly.
|
||||
static future<> scan_table_ranges(
|
||||
service::storage_proxy& proxy,
|
||||
const scan_ranges_context& scan_ctx,
|
||||
dht::partition_range_vector&& partition_ranges,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
named_semaphore& page_sem)
|
||||
{
|
||||
const schema_ptr& s = scan_ctx.s;
|
||||
assert (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||
@@ -603,7 +595,6 @@ static future<> scan_table_ranges(
|
||||
expired = is_expired(n, now);
|
||||
}
|
||||
if (expired) {
|
||||
expiration_stats.items_deleted++;
|
||||
// FIXME: maybe don't recalculate new_timestamp() all the time
|
||||
// FIXME: if expire_item() throws on timeout, we need to retry it.
|
||||
auto ts = api::new_timestamp();
|
||||
@@ -615,7 +606,7 @@ static future<> scan_table_ranges(
|
||||
}
|
||||
}
|
||||
|
||||
// scan_table() scans, in one table, data "owned" by this shard, looking for
|
||||
// scan_table() scans data in one table "owned" by this shard, looking for
|
||||
// expired items and deleting them.
|
||||
// We consider each node to "own" its primary token ranges, i.e., the tokens
|
||||
// that this node is their first replica in the ring. Inside the node, each
|
||||
@@ -639,13 +630,11 @@ static future<bool> scan_table(
|
||||
data_dictionary::database db,
|
||||
schema_ptr s,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
named_semaphore& page_sem)
|
||||
{
|
||||
// Check if an expiration-time attribute is enabled for this table.
|
||||
// If not, just return false immediately.
|
||||
// FIXME: the setting of the TTL may change in the middle of a long scan!
|
||||
std::optional<std::string> attribute_name = db::find_tag(*s, TTL_TAG_KEY);
|
||||
std::optional<std::string> attribute_name = find_tag(*s, TTL_TAG_KEY);
|
||||
if (!attribute_name) {
|
||||
co_return false;
|
||||
}
|
||||
@@ -686,10 +675,11 @@ static future<bool> scan_table(
|
||||
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
|
||||
co_return false;
|
||||
}
|
||||
expiration_stats.scan_table++;
|
||||
// FIXME: need to pace the scan, not do it all at once.
|
||||
// FIXME: consider if we should ask the scan without caching?
|
||||
// can we use cache but not fill it?
|
||||
scan_ranges_context scan_ctx{s, proxy, std::move(column_name), std::move(member)};
|
||||
token_ranges_owned_by_this_shard<primary> my_ranges(db.real_database(), proxy.gossiper(), s);
|
||||
token_ranges_owned_by_this_shard<primary> my_ranges(db.real_database(), s);
|
||||
while (std::optional<dht::partition_range> range = my_ranges.next_partition_range()) {
|
||||
// Note that because of issue #9167 we need to run a separate
|
||||
// query on each partition range, and can't pass several of
|
||||
@@ -700,7 +690,7 @@ static future<bool> scan_table(
|
||||
// we fail the entire scan (and rescan from the beginning). Need to
|
||||
// reconsider this. Saving the scan position might be a good enough
|
||||
// solution for this problem.
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem);
|
||||
}
|
||||
// If each node only scans its own primary ranges, then when any node is
|
||||
// down part of the token range will not get scanned. This can be viewed
|
||||
@@ -709,12 +699,11 @@ static future<bool> scan_table(
|
||||
// by tasking another node to take over scanning of the dead node's primary
|
||||
// ranges. What we do here is that this node will also check expiration
|
||||
// on its *secondary* ranges - but only those whose primary owner is down.
|
||||
token_ranges_owned_by_this_shard<secondary> my_secondary_ranges(db.real_database(), proxy.gossiper(), s);
|
||||
token_ranges_owned_by_this_shard<secondary> my_secondary_ranges(db.real_database(), s);
|
||||
while (std::optional<dht::partition_range> range = my_secondary_ranges.next_partition_range()) {
|
||||
expiration_stats.secondary_ranges_scanned++;
|
||||
dht::partition_range_vector partition_ranges;
|
||||
partition_ranges.push_back(std::move(*range));
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem);
|
||||
}
|
||||
co_return true;
|
||||
}
|
||||
@@ -727,7 +716,6 @@ future<> expiration_service::run() {
|
||||
// also need to notice when a new table is added, a table is
|
||||
// deleted or when ttl is enabled or disabled for a table!
|
||||
for (;;) {
|
||||
auto start = lowres_clock::now();
|
||||
// _db.tables() may change under our feet during a
|
||||
// long-living loop, so we must keep our own copy of the list of
|
||||
// schemas.
|
||||
@@ -741,7 +729,7 @@ future<> expiration_service::run() {
|
||||
co_return;
|
||||
}
|
||||
try {
|
||||
co_await scan_table(_proxy, _db, s, _abort_source, _page_sem, _expiration_stats);
|
||||
co_await scan_table(_proxy, _db, s, _abort_source, _page_sem);
|
||||
} catch (...) {
|
||||
// The scan of a table may fail in the middle for many
|
||||
// reasons, including network failure and even the table
|
||||
@@ -760,28 +748,17 @@ future<> expiration_service::run() {
|
||||
}
|
||||
}
|
||||
}
|
||||
_expiration_stats.scan_passes++;
|
||||
// The TTL scanner runs above once over all tables, at full steam.
|
||||
// After completing such a scan, we sleep until it's time start
|
||||
// another scan. TODO: If the scan went too fast, we can slow it down
|
||||
// in the next iteration by reducing the scanner's scheduling-group
|
||||
// share (if using a separate scheduling group), or introduce
|
||||
// finer-grain sleeps into the scanning code.
|
||||
std::chrono::seconds scan_duration(std::chrono::duration_cast<std::chrono::seconds>(lowres_clock::now() - start));
|
||||
std::chrono::seconds period(_db.get_config().alternator_ttl_period_in_seconds());
|
||||
if (scan_duration < period) {
|
||||
try {
|
||||
tlogger.info("sleeping {} seconds until next period", (period - scan_duration).count());
|
||||
co_await seastar::sleep_abortable(period - scan_duration, _abort_source);
|
||||
} catch(seastar::sleep_aborted&) {}
|
||||
}
|
||||
// FIXME: replace this silly 1-second sleep by something smarter.
|
||||
try {
|
||||
co_await seastar::sleep_abortable(std::chrono::seconds(1), _abort_source);
|
||||
} catch(seastar::sleep_aborted&) {}
|
||||
}
|
||||
}
|
||||
|
||||
future<> expiration_service::start() {
|
||||
// Called by main() on each shard to start the expiration-service
|
||||
// thread. Just runs run() in the background and allows stop().
|
||||
if (_db.features().alternator_ttl) {
|
||||
if (_db.features().cluster_supports_alternator_ttl()) {
|
||||
if (!shutting_down()) {
|
||||
_end = run().handle_exception([] (std::exception_ptr ep) {
|
||||
tlogger.error("expiration_service failed: {}", ep);
|
||||
@@ -803,18 +780,4 @@ future<> expiration_service::stop() {
|
||||
return std::move(*_end);
|
||||
}
|
||||
|
||||
expiration_service::stats::stats() {
|
||||
_metrics.add_group("expiration", {
|
||||
seastar::metrics::make_total_operations("scan_passes", scan_passes,
|
||||
seastar::metrics::description("number of passes over the database")),
|
||||
seastar::metrics::make_total_operations("scan_table", scan_table,
|
||||
seastar::metrics::description("number of table scans (counting each scan of each table that enabled expiration)")),
|
||||
seastar::metrics::make_total_operations("items_deleted", items_deleted,
|
||||
seastar::metrics::description("number of items deleted after expiration")),
|
||||
seastar::metrics::make_total_operations("secondary_ranges_scanned", secondary_ranges_scanned,
|
||||
seastar::metrics::description("number of token ranges scanned by this node while their primary owner was down")),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
} // namespace alternator
|
||||
|
||||
@@ -28,23 +28,6 @@ namespace alternator {
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLeave request.
|
||||
class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
|
||||
public:
|
||||
// Object holding per-shard statistics related to the expiration service.
|
||||
// While this object is alive, these metrics are also registered to be
|
||||
// visible by the metrics REST API, with the "expiration_" prefix.
|
||||
class stats {
|
||||
public:
|
||||
stats();
|
||||
uint64_t scan_passes = 0;
|
||||
uint64_t scan_table = 0;
|
||||
uint64_t items_deleted = 0;
|
||||
uint64_t secondary_ranges_scanned = 0;
|
||||
private:
|
||||
// The metric_groups object holds this stat object's metrics registered
|
||||
// as long as the stats object is alive.
|
||||
seastar::metrics::metric_groups _metrics;
|
||||
};
|
||||
private:
|
||||
data_dictionary::database _db;
|
||||
service::storage_proxy& _proxy;
|
||||
// _end is set by start(), and resolves when the the background service
|
||||
@@ -55,7 +38,6 @@ private:
|
||||
// Ensures that at most 1 page of scan results at a time is processed by the TTL service
|
||||
named_semaphore _page_sem{1, named_semaphore_exception_factory{"alternator_ttl"}};
|
||||
bool shutting_down() { return _abort_source.abort_requested(); }
|
||||
stats _expiration_stats;
|
||||
public:
|
||||
// sharded_service<expiration_service>::start() creates this object on
|
||||
// all shards, so calls this constructor on each shard. Later, the
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/authorization_cache",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/authorization_cache/reset",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Reset cache",
|
||||
"type":"void",
|
||||
"nickname":"authorization_cache_reset",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
}
|
||||
}
|
||||
@@ -134,7 +134,7 @@
|
||||
},
|
||||
{
|
||||
"name":"tables",
|
||||
"description":"Comma-separated tables to stop compaction in",
|
||||
"description":"Comma-seperated tables to stop compaction in",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
|
||||
@@ -667,7 +667,7 @@
|
||||
},
|
||||
{
|
||||
"name":"kn",
|
||||
"description":"Comma-separated keyspaces name that their snapshot will be deleted",
|
||||
"description":"Comma seperated keyspaces name that their snapshot will be deleted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -723,7 +723,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -755,7 +755,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -787,7 +787,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table names",
|
||||
"description":"Comma-seperated table names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -862,7 +862,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -902,7 +902,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -934,7 +934,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -1946,7 +1946,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Forces this node to recalculate versions of schema objects.",
|
||||
"summary":"Reset local schema",
|
||||
"type":"void",
|
||||
"nickname":"reset_local_schema",
|
||||
"produces":[
|
||||
@@ -2073,7 +2073,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2100,7 +2100,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2641,7 +2641,7 @@
|
||||
"version":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"ka", "la", "mc", "md", "me"
|
||||
"ka", "la", "mc", "md"
|
||||
],
|
||||
"description":"SSTable version"
|
||||
},
|
||||
|
||||
45
api/api.cc
45
api/api.cc
@@ -24,7 +24,6 @@
|
||||
#include "compaction_manager.hh"
|
||||
#include "hinted_handoff.hh"
|
||||
#include "error_injection.hh"
|
||||
#include "authorization_cache.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "stream_manager.hh"
|
||||
#include "system.hh"
|
||||
@@ -97,9 +96,9 @@ future<> unset_rpc_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_rpc_controller(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ks) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &g, &cdc_gs, &sys_ks] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, g.local(), cdc_gs, sys_ks);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &g, &cdc_gs] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, g.local(), cdc_gs);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -127,17 +126,6 @@ future<> unset_server_repair(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_repair(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_authorization_cache(http_context &ctx, sharded<auth::service> &auth_service) {
|
||||
return register_api(ctx, "authorization_cache",
|
||||
"The authorization cache API", [&auth_service] (http_context &ctx, routes &r) {
|
||||
set_authorization_cache(ctx, r, auth_service);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_authorization_cache(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_authorization_cache(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &snap_ctl] (routes& r) { set_snapshot(ctx, r, snap_ctl); });
|
||||
}
|
||||
@@ -245,32 +233,5 @@ future<> set_server_done(http_context& ctx) {
|
||||
});
|
||||
}
|
||||
|
||||
void req_params::process(const request& req) {
|
||||
// Process mandatory parameters
|
||||
for (auto& [name, ent] : params) {
|
||||
if (!ent.is_mandatory) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
ent.value = req.param[name];
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Mandatory parameter '{}' was not provided", name));
|
||||
}
|
||||
}
|
||||
|
||||
// Process optional parameters
|
||||
for (auto& [name, value] : req.query_parameters) {
|
||||
try {
|
||||
auto& ent = params.at(name);
|
||||
if (ent.is_mandatory) {
|
||||
throw httpd::bad_param_exception(fmt::format("Parameter '{}' is expected to be provided as part of the request url", name));
|
||||
}
|
||||
ent.value = value;
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Unsupported optional parameter '{}'", name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
61
api/api.hh
61
api/api.hh
@@ -237,67 +237,6 @@ public:
|
||||
operator T() const { return value; }
|
||||
};
|
||||
|
||||
using mandatory = bool_class<struct mandatory_tag>;
|
||||
|
||||
class req_params {
|
||||
public:
|
||||
struct def {
|
||||
std::optional<sstring> value;
|
||||
mandatory is_mandatory = mandatory::no;
|
||||
|
||||
def(std::optional<sstring> value_ = std::nullopt, mandatory is_mandatory_ = mandatory::no)
|
||||
: value(std::move(value_))
|
||||
, is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
|
||||
def(mandatory is_mandatory_)
|
||||
: is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
};
|
||||
|
||||
private:
|
||||
std::unordered_map<sstring, def> params;
|
||||
|
||||
public:
|
||||
req_params(std::initializer_list<std::pair<sstring, def>> l) {
|
||||
for (const auto& [name, ent] : l) {
|
||||
add(std::move(name), std::move(ent));
|
||||
}
|
||||
}
|
||||
|
||||
void add(sstring name, def ent) {
|
||||
params.emplace(std::move(name), std::move(ent));
|
||||
}
|
||||
|
||||
void process(const request& req);
|
||||
|
||||
const std::optional<sstring>& get(const char* name) const {
|
||||
return params.at(name).value;
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
const std::optional<T> get_as(const char* name) const {
|
||||
return get(name);
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
requires std::same_as<T, bool>
|
||||
const std::optional<bool> get_as(const char* name) const {
|
||||
auto value = get(name);
|
||||
if (!value) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::transform(value->begin(), value->end(), value->begin(), ::tolower);
|
||||
if (value == "true" || value == "yes" || value == "1") {
|
||||
return true;
|
||||
}
|
||||
if (value == "false" || value == "no" || value == "0") {
|
||||
return false;
|
||||
}
|
||||
throw boost::bad_lexical_cast{};
|
||||
}
|
||||
};
|
||||
|
||||
utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimated_histogram& val);
|
||||
|
||||
}
|
||||
|
||||
@@ -42,7 +42,6 @@ class config;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
@@ -54,8 +53,6 @@ class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth { class service; }
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context {
|
||||
@@ -79,7 +76,7 @@ struct http_context {
|
||||
future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg);
|
||||
future<> set_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ks);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs);
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
||||
future<> unset_server_sstables_loader(http_context& ctx);
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb);
|
||||
@@ -90,8 +87,6 @@ future<> set_transport_controller(http_context& ctx, cql_transport::controller&
|
||||
future<> unset_transport_controller(http_context& ctx);
|
||||
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl);
|
||||
future<> unset_rpc_controller(http_context& ctx);
|
||||
future<> set_server_authorization_cache(http_context& ctx, sharded<auth::service> &auth_service);
|
||||
future<> unset_server_authorization_cache(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
future<> unset_server_snapshot(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "api/api-doc/authorization_cache.json.hh"
|
||||
|
||||
#include "api/authorization_cache.hh"
|
||||
#include "api/api.hh"
|
||||
#include "auth/common.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
|
||||
void set_authorization_cache(http_context& ctx, routes& r, sharded<auth::service> &auth_service) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.set(r, [&auth_service] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
co_await auth_service.invoke_on_all([] (auth::service& auth) -> future<> {
|
||||
auth.reset_authorization_cache();
|
||||
return make_ready_future<>();
|
||||
});
|
||||
|
||||
co_return json_void();
|
||||
});
|
||||
}
|
||||
|
||||
void unset_authorization_cache(http_context& ctx, routes& r) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_authorization_cache(http_context& ctx, routes& r, sharded<auth::service> &auth_service);
|
||||
void unset_authorization_cache(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
@@ -29,11 +29,8 @@ static auto transformer(const std::vector<collectd_value>& values) {
|
||||
case scollectd::data_type::GAUGE:
|
||||
collected_value.values.push(v.d());
|
||||
break;
|
||||
case scollectd::data_type::COUNTER:
|
||||
collected_value.values.push(v.ui());
|
||||
break;
|
||||
case scollectd::data_type::REAL_COUNTER:
|
||||
collected_value.values.push(v.d());
|
||||
case scollectd::data_type::DERIVE:
|
||||
collected_value.values.push(v.i());
|
||||
break;
|
||||
default:
|
||||
collected_value.values.push(v.ui());
|
||||
|
||||
@@ -79,14 +79,14 @@ future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const replica::column_family& cf) {
|
||||
return (cf.get_stats().*f).hist.count;
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([uuid, f](replica::database& db) {
|
||||
// Histograms information is sample of the actual load
|
||||
@@ -102,7 +102,7 @@ static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const
|
||||
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [f](const replica::column_family& cf) {
|
||||
return (cf.get_stats().*f).hist.count;
|
||||
}, std::plus<int64_t>());
|
||||
@@ -120,19 +120,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, const
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
std::plus<utils::ihistogram>())
|
||||
.then([](const utils::ihistogram& val) {
|
||||
return make_ready_future<json::json_return_type>(to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
std::function<utils::ihistogram(const replica::database&)> fun = [f] (const replica::database& db) {
|
||||
utils::ihistogram res;
|
||||
for (auto i : db.get_column_families()) {
|
||||
@@ -148,7 +136,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils:
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
utils::UUID uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).rate();},
|
||||
@@ -159,7 +147,7 @@ static future<json::json_return_type> get_cf_rate_and_histogram(http_context& c
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
std::function<utils::rate_moving_average_and_histogram(const replica::database&)> fun = [f] (const replica::database& db) {
|
||||
utils::rate_moving_average_and_histogram res;
|
||||
for (auto i : db.get_column_families()) {
|
||||
@@ -815,19 +803,19 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
cf::get_cas_prepare.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_prepare.histogram();
|
||||
return cf.get_stats().estimated_cas_prepare;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_cas_propose.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_accept.histogram();
|
||||
return cf.get_stats().estimated_cas_accept;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_cas_commit.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_learn.histogram();
|
||||
return cf.get_stats().estimated_cas_learn;
|
||||
});
|
||||
});
|
||||
|
||||
@@ -933,13 +921,13 @@ void set_column_family(http_context& ctx, routes& r) {
|
||||
|
||||
cf::get_read_latency_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().reads.histogram();
|
||||
return cf.get_stats().estimated_read;
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_write_latency_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().writes.histogram();
|
||||
return cf.get_stats().estimated_write;
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -119,7 +119,7 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
return parallel_for_each(table_names, [&db, &cm, &ks_name, type] (sstring& table_name) {
|
||||
auto& t = db.find_column_family(ks_name, table_name);
|
||||
return cm.stop_compaction(type, &t.as_table_state());
|
||||
return cm.stop_compaction(type, &t);
|
||||
});
|
||||
});
|
||||
co_return json_void();
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "locator/token_metadata.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "endpoint_snitch.hh"
|
||||
#include "api/api-doc/endpoint_snitch_info.json.hh"
|
||||
@@ -20,14 +19,12 @@ void set_endpoint_snitch(http_context& ctx, routes& r) {
|
||||
return host.empty() ? gms::inet_address(utils::fb_utilities::get_broadcast_address()) : gms::inet_address(host);
|
||||
};
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [&ctx](const_req req) {
|
||||
auto& topology = ctx.shared_token_metadata.local().get()->get_topology();
|
||||
return topology.get_datacenter(host_or_broadcast(req));
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [](const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(host_or_broadcast(req));
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [&ctx](const_req req) {
|
||||
auto& topology = ctx.shared_token_metadata.local().get()->get_topology();
|
||||
return topology.get_rack(host_or_broadcast(req));
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [](const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_rack(host_or_broadcast(req));
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.set(r, [] (const_req req) {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include "seastar/core/future-util.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
|
||||
@@ -17,86 +17,77 @@ namespace fd = httpd::failure_detector_json;
|
||||
|
||||
void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
fd::get_all_endpoint_states.set(r, [&g](std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::vector<fd::endpoint_state> res;
|
||||
for (auto i : g.get_endpoint_states()) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = boost::lexical_cast<std::string>(i.first);
|
||||
val.is_alive = i.second.is_alive();
|
||||
val.generation = i.second.get_heart_beat_state().get_generation();
|
||||
val.version = i.second.get_heart_beat_state().get_heart_beat_version();
|
||||
val.update_time = i.second.get_update_timestamp().time_since_epoch().count();
|
||||
for (auto a : i.second.get_application_state_map()) {
|
||||
fd::version_value version_val;
|
||||
// We return the enum index and not it's name to stay compatible to origin
|
||||
// method that the state index are static but the name can be changed.
|
||||
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(a.first);
|
||||
version_val.value = a.second.value;
|
||||
version_val.version = a.second.version;
|
||||
val.application_state.push(version_val);
|
||||
}
|
||||
res.push_back(val);
|
||||
std::vector<fd::endpoint_state> res;
|
||||
for (auto i : g.endpoint_state_map) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = boost::lexical_cast<std::string>(i.first);
|
||||
val.is_alive = i.second.is_alive();
|
||||
val.generation = i.second.get_heart_beat_state().get_generation();
|
||||
val.version = i.second.get_heart_beat_state().get_heart_beat_version();
|
||||
val.update_time = i.second.get_update_timestamp().time_since_epoch().count();
|
||||
for (auto a : i.second.get_application_state_map()) {
|
||||
fd::version_value version_val;
|
||||
// We return the enum index and not it's name to stay compatible to origin
|
||||
// method that the state index are static but the name can be changed.
|
||||
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(a.first);
|
||||
version_val.value = a.second.value;
|
||||
version_val.version = a.second.version;
|
||||
val.application_state.push(version_val);
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
res.push_back(val);
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
fd::get_up_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
int res = g.get_up_endpoint_count();
|
||||
return gms::get_up_endpoint_count(g).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_down_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
int res = g.get_down_endpoint_count();
|
||||
return gms::get_down_endpoint_count(g).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_phi_convict_threshold.set(r, [] (std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(8);
|
||||
return gms::get_phi_convict_threshold().then([](double res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_simple_states.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::map<sstring, sstring> nodes_status;
|
||||
for (auto& entry : g.get_endpoint_states()) {
|
||||
nodes_status.emplace(entry.first.to_sstring(), entry.second.is_alive() ? "UP" : "DOWN");
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(nodes_status));
|
||||
return gms::get_simple_states(g).then([](const std::map<sstring, sstring>& map) {
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(map));
|
||||
});
|
||||
});
|
||||
|
||||
fd::set_phi_convict_threshold.set(r, [](std::unique_ptr<request> req) {
|
||||
double phi = atof(req->get_query_param("phi").c_str());
|
||||
return make_ready_future<json::json_return_type>("");
|
||||
return gms::set_phi_convict_threshold(phi).then([]() {
|
||||
return make_ready_future<json::json_return_type>("");
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_state.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [req = std::move(req)] (gms::gossiper& g) {
|
||||
auto* state = g.get_endpoint_state_for_endpoint_ptr(gms::inet_address(req->param["addr"]));
|
||||
if (!state) {
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->param["addr"]));
|
||||
}
|
||||
std::stringstream ss;
|
||||
g.append_endpoint_state(ss, *state);
|
||||
return make_ready_future<json::json_return_type>(sstring(ss.str()));
|
||||
return get_endpoint_state(g, req->param["addr"]).then([](const sstring& state) {
|
||||
return make_ready_future<json::json_return_type>(state);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_phi_values.set(r, [](std::unique_ptr<request> req) {
|
||||
std::map<gms::inet_address, gms::arrival_window> map;
|
||||
std::vector<fd::endpoint_phi_value> res;
|
||||
auto now = gms::arrival_window::clk::now();
|
||||
for (auto& p : map) {
|
||||
fd::endpoint_phi_value val;
|
||||
val.endpoint = p.first.to_sstring();
|
||||
val.phi = p.second.phi(now);
|
||||
res.emplace_back(std::move(val));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return gms::get_arrival_samples().then([](std::map<gms::inet_address, gms::arrival_window> map) {
|
||||
std::vector<fd::endpoint_phi_value> res;
|
||||
auto now = gms::arrival_window::clk::now();
|
||||
for (auto& p : map) {
|
||||
fd::endpoint_phi_value val;
|
||||
val.endpoint = p.first.to_sstring();
|
||||
val.phi = p.second.phi(now);
|
||||
res.emplace_back(std::move(val));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -19,11 +19,9 @@ void set_gossiper(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
return container_to_vec(res);
|
||||
});
|
||||
|
||||
|
||||
httpd::gossiper_json::get_live_endpoint.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.get_live_members_synchronized().then([] (auto res) {
|
||||
return make_ready_future<json::json_return_type>(container_to_vec(res));
|
||||
});
|
||||
httpd::gossiper_json::get_live_endpoint.set(r, [&g] (const_req req) {
|
||||
auto res = g.get_live_members();
|
||||
return container_to_vec(res);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_endpoint_downtime.set(r, [&g] (const_req req) {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
#include "db/config.hh"
|
||||
#include "utils/histogram.hh"
|
||||
#include "replica/database.hh"
|
||||
#include <seastar/core/scheduling_specific.hh>
|
||||
#include "seastar/core/scheduling_specific.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#include "db/config.hh"
|
||||
#include "db/schema_tables.hh"
|
||||
#include "utils/hash.hh"
|
||||
#include <optional>
|
||||
#include <sstream>
|
||||
#include <time.h>
|
||||
#include <algorithm>
|
||||
@@ -25,9 +24,8 @@
|
||||
#include "db/commitlog/commitlog.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "seastar/http/exception.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/parallel_for_each.hh>
|
||||
#include "repair/row_level.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "column_family.hh"
|
||||
@@ -58,25 +56,23 @@ const locator::token_metadata& http_context::get_token_metadata() {
|
||||
namespace ss = httpd::storage_service_json;
|
||||
using namespace json;
|
||||
|
||||
sstring validate_keyspace(http_context& ctx, sstring ks_name) {
|
||||
sstring validate_keyspace(http_context& ctx, const parameters& param) {
|
||||
const auto& ks_name = param["keyspace"];
|
||||
if (ctx.db.local().has_keyspace(ks_name)) {
|
||||
return ks_name;
|
||||
}
|
||||
throw bad_param_exception(replica::no_such_keyspace(ks_name).what());
|
||||
}
|
||||
|
||||
sstring validate_keyspace(http_context& ctx, const parameters& param) {
|
||||
return validate_keyspace(ctx, param["keyspace"]);
|
||||
}
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, sstring value) {
|
||||
if (value.empty()) {
|
||||
return map_keys(ctx.db.local().find_keyspace(ks_name).metadata().get()->cf_meta_data());
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name) {
|
||||
auto it = query_params.find(param_name);
|
||||
if (it == query_params.end()) {
|
||||
return {};
|
||||
}
|
||||
std::vector<sstring> names = split(value, ",");
|
||||
std::vector<sstring> names = split(it->second, ",");
|
||||
try {
|
||||
for (const auto& table_name : names) {
|
||||
ctx.db.local().find_column_family(ks_name, table_name);
|
||||
@@ -87,14 +83,6 @@ std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, sst
|
||||
return names;
|
||||
}
|
||||
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name) {
|
||||
auto it = query_params.find(param_name);
|
||||
if (it == query_params.end()) {
|
||||
return {};
|
||||
}
|
||||
return parse_tables(ks_name, ctx, it->second);
|
||||
}
|
||||
|
||||
static ss::token_range token_range_endpoints_to_json(const dht::token_range_endpoints& d) {
|
||||
ss::token_range r;
|
||||
r.start_token = d._start_token;
|
||||
@@ -157,7 +145,7 @@ seastar::future<json::json_return_type> run_toppartitions_query(db::toppartition
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
future<json::json_return_type> set_tables_autocompaction(http_context& ctx, service::storage_service& ss, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
@@ -184,21 +172,17 @@ future<json::json_return_type> set_tables_autocompaction(http_context& ctx, cons
|
||||
}
|
||||
|
||||
void set_transport_controller(http_context& ctx, routes& r, cql_transport::controller& ctl) {
|
||||
ss::start_native_transport.set(r, [&ctx, &ctl](std::unique_ptr<request> req) {
|
||||
ss::start_native_transport.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] {
|
||||
return ctl.start_server();
|
||||
});
|
||||
return ctl.start_server();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::stop_native_transport.set(r, [&ctx, &ctl](std::unique_ptr<request> req) {
|
||||
ss::stop_native_transport.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] {
|
||||
return ctl.request_stop_server();
|
||||
});
|
||||
return ctl.request_stop_server();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -220,21 +204,17 @@ void unset_transport_controller(http_context& ctx, routes& r) {
|
||||
}
|
||||
|
||||
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl) {
|
||||
ss::stop_rpc_server.set(r, [&ctx, &ctl] (std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&ctx, &ctl] {
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] () mutable {
|
||||
return ctl.request_stop_server();
|
||||
});
|
||||
ss::stop_rpc_server.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.request_stop_server();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::start_rpc_server.set(r, [&ctx, &ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&ctx, &ctl] {
|
||||
return with_scheduling_group(ctx.db.local().get_statement_scheduling_group(), [&ctl] () mutable {
|
||||
return ctl.start_server();
|
||||
});
|
||||
ss::start_rpc_server.set(r, [&ctl](std::unique_ptr<request> req) {
|
||||
return smp::submit_to(0, [&] {
|
||||
return ctl.start_server();
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -404,10 +384,11 @@ static future<json::json_return_type> describe_ring_as_json(sharded<service::sto
|
||||
co_return json::json_return_type(stream_range_as_array(co_await ss.local().describe_ring(keyspace), token_range_endpoints_to_json));
|
||||
}
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ks) {
|
||||
ss::local_hostid.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
auto id = ctx.db.local().get_config().host_id;
|
||||
return make_ready_future<json::json_return_type>(id.to_sstring());
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs) {
|
||||
ss::local_hostid.set(r, [](std::unique_ptr<request> req) {
|
||||
return db::system_keyspace::load_local_host_id().then([](const utils::UUID& id) {
|
||||
return make_ready_future<json::json_return_type>(id.to_sstring());
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_tokens.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
@@ -523,10 +504,10 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return ctx.db.local().get_config().saved_caches_directory();
|
||||
});
|
||||
|
||||
ss::get_range_to_endpoint_map.set(r, [&ctx, &ss](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
ss::get_range_to_endpoint_map.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
std::vector<ss::maplist_mapper> res;
|
||||
co_return stream_range_as_array(co_await ss.local().get_range_to_address_map(keyspace),
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(ss.local().get_range_to_address_map(keyspace),
|
||||
[](const std::pair<dht::token_range, inet_address_vector_replica_set>& entry){
|
||||
ss::maplist_mapper m;
|
||||
if (entry.first.start()) {
|
||||
@@ -543,7 +524,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
m.value.push(address.to_sstring());
|
||||
}
|
||||
return m;
|
||||
});
|
||||
}));
|
||||
});
|
||||
|
||||
ss::get_pending_range_to_endpoint_map.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
@@ -555,13 +536,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::describe_any_ring.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
// Find an arbitrary non-system keyspace.
|
||||
auto keyspaces = ctx.db.local().get_non_system_keyspaces();
|
||||
if (keyspaces.empty()) {
|
||||
throw std::runtime_error("No keyspace provided and no non system kespace exist");
|
||||
}
|
||||
auto ks = keyspaces[0];
|
||||
return describe_ring_as_json(ss, ks);
|
||||
return describe_ring_as_json(ss, "");
|
||||
});
|
||||
|
||||
ss::describe_ring.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
@@ -659,11 +634,10 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return db.find_column_family(id).get_stats().live_disk_space_used;
|
||||
});
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto owned_ranges_ptr = compaction::make_owned_ranges_ptr(db.get_keyspace_local_ranges(keyspace));
|
||||
// as a table can be dropped during loop below, let's find it before issuing the cleanup request.
|
||||
for (auto& id : table_ids) {
|
||||
replica::table& t = db.find_column_family(id);
|
||||
co_await t.perform_cleanup_compaction(owned_ranges_ptr);
|
||||
co_await t.perform_cleanup_compaction(db);
|
||||
}
|
||||
co_return;
|
||||
}).then([]{
|
||||
@@ -689,11 +663,10 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
apilog.info("upgrade_sstables: keyspace={} tables={} exclude_current_version={}", keyspace, column_families, exclude_current_version);
|
||||
return ctx.db.invoke_on_all([=] (replica::database& db) {
|
||||
auto owned_ranges_ptr = compaction::make_owned_ranges_ptr(db.get_keyspace_local_ranges(keyspace));
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto& cf = db.find_column_family(keyspace, cfname);
|
||||
return cm.perform_sstable_upgrade(owned_ranges_ptr, cf.as_table_state(), exclude_current_version);
|
||||
return cm.perform_sstable_upgrade(db, &cf, exclude_current_version);
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
@@ -784,13 +757,13 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::get_operation_mode.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([] (auto mode) {
|
||||
return make_ready_future<json::json_return_type>(format("{}", mode));
|
||||
return make_ready_future<json::json_return_type>(mode);
|
||||
});
|
||||
});
|
||||
|
||||
ss::is_starting.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([] (auto mode) {
|
||||
return make_ready_future<json::json_return_type>(mode <= service::storage_service::mode::STARTING);
|
||||
return ss.local().is_starting().then([] (auto starting) {
|
||||
return make_ready_future<json::json_return_type>(starting);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -820,7 +793,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::get_keyspaces.set(r, [&ctx](const_req req) {
|
||||
auto type = req.get_query_param("type");
|
||||
if (type == "user") {
|
||||
return ctx.db.local().get_user_keyspaces();
|
||||
return ctx.db.local().get_non_system_keyspaces();
|
||||
} else if (type == "non_local_strategy") {
|
||||
return map_keys(ctx.db.local().get_keyspaces() | boost::adaptors::filtered([](const auto& p) {
|
||||
return p.second.get_replication_strategy().get_type() != locator::replication_strategy_type::local;
|
||||
@@ -830,9 +803,8 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::update_snitch.set(r, [](std::unique_ptr<request> req) {
|
||||
locator::snitch_config cfg;
|
||||
cfg.name = req->get_query_param("ep_snitch_class_name");
|
||||
return locator::i_endpoint_snitch::reset_snitch(cfg).then([] {
|
||||
auto ep_snitch_class_name = req->get_query_param("ep_snitch_class_name");
|
||||
return locator::i_endpoint_snitch::reset_snitch(ep_snitch_class_name).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
@@ -864,13 +836,9 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::is_initialized.set(r, [&ss, &g](std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([&g] (auto mode) {
|
||||
bool is_initialized = mode >= service::storage_service::mode::STARTING;
|
||||
if (mode == service::storage_service::mode::NORMAL) {
|
||||
is_initialized = g.is_enabled();
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(is_initialized);
|
||||
ss::is_initialized.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().is_initialized().then([] (bool initialized) {
|
||||
return make_ready_future<json::json_return_type>(initialized);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -879,9 +847,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::is_joined.set(r, [&ss] (std::unique_ptr<request> req) {
|
||||
return ss.local().get_operation_mode().then([] (auto mode) {
|
||||
return make_ready_future<json::json_return_type>(mode >= service::storage_service::mode::JOINING);
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(ss.local().is_joined());
|
||||
});
|
||||
|
||||
ss::set_stream_throughput_mb_per_sec.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -981,11 +947,14 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
ss::reset_local_schema.set(r, [&ss](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
ss::reset_local_schema.set(r, [](std::unique_ptr<request> req) {
|
||||
// FIXME: We should truncate schema tables if more than one node in the cluster.
|
||||
auto& sp = service::get_storage_proxy();
|
||||
auto& fs = sp.local().features();
|
||||
apilog.info("reset_local_schema");
|
||||
co_await ss.local().reload_schema();
|
||||
co_return json_void();
|
||||
return db::schema_tables::recalculate_schema_version(sp, fs).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
ss::set_trace_probability.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -1051,20 +1020,20 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
}
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
ss::enable_auto_compaction.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
return set_tables_autocompaction(ctx, ss.local(), keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
ss::disable_auto_compaction.set(r, [&ctx, &ss](std::unique_ptr<request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
return set_tables_autocompaction(ctx, ss.local(), keyspace, tables, false);
|
||||
});
|
||||
|
||||
ss::deliver_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -1213,7 +1182,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::sstable info;
|
||||
|
||||
info.timestamp = t;
|
||||
info.generation = sstables::generation_value(sstable->generation());
|
||||
info.generation = sstable->generation();
|
||||
info.level = sstable->get_sstable_level();
|
||||
info.size = sstable->bytes_on_disk();
|
||||
info.data_size = sstable->ondisk_data_size();
|
||||
@@ -1290,13 +1259,6 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
}
|
||||
|
||||
enum class scrub_status {
|
||||
successful = 0,
|
||||
aborted,
|
||||
unable_to_cancel, // Not used in Scylla, included to ensure compability with nodetool api.
|
||||
validation_errors,
|
||||
};
|
||||
|
||||
void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
ss::get_snapshot_details.set(r, [&snap_ctl](std::unique_ptr<request> req) {
|
||||
return snap_ctl.local().get_snapshot_details().then([] (std::unordered_map<sstring, std::vector<db::snapshot_ctl::snapshot_details>>&& result) {
|
||||
@@ -1383,29 +1345,17 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
});
|
||||
});
|
||||
|
||||
ss::scrub.set(r, [&ctx, &snap_ctl] (std::unique_ptr<request> req) {
|
||||
auto rp = req_params({
|
||||
{"keyspace", {mandatory::yes}},
|
||||
{"cf", {""}},
|
||||
{"scrub_mode", {}},
|
||||
{"skip_corrupted", {}},
|
||||
{"disable_snapshot", {}},
|
||||
{"quarantine_mode", {}},
|
||||
});
|
||||
rp.process(*req);
|
||||
auto keyspace = validate_keyspace(ctx, *rp.get("keyspace"));
|
||||
auto column_families = parse_tables(keyspace, ctx, *rp.get("cf"));
|
||||
auto scrub_mode_opt = rp.get("scrub_mode");
|
||||
ss::scrub.set(r, wrap_ks_cf(ctx, [&snap_ctl] (http_context& ctx, std::unique_ptr<request> req, sstring keyspace, std::vector<sstring> column_families) {
|
||||
auto scrub_mode = sstables::compaction_type_options::scrub::mode::abort;
|
||||
|
||||
if (!scrub_mode_opt) {
|
||||
const auto skip_corrupted = rp.get_as<bool>("skip_corrupted").value_or(false);
|
||||
const sstring scrub_mode_str = req_param<sstring>(*req, "scrub_mode", "");
|
||||
if (scrub_mode_str == "") {
|
||||
const auto skip_corrupted = req_param<bool>(*req, "skip_corrupted", false);
|
||||
|
||||
if (skip_corrupted) {
|
||||
scrub_mode = sstables::compaction_type_options::scrub::mode::skip;
|
||||
}
|
||||
} else {
|
||||
auto scrub_mode_str = *scrub_mode_opt;
|
||||
if (scrub_mode_str == "ABORT") {
|
||||
scrub_mode = sstables::compaction_type_options::scrub::mode::abort;
|
||||
} else if (scrub_mode_str == "SKIP") {
|
||||
@@ -1415,7 +1365,7 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
} else if (scrub_mode_str == "VALIDATE") {
|
||||
scrub_mode = sstables::compaction_type_options::scrub::mode::validate;
|
||||
} else {
|
||||
throw httpd::bad_param_exception(fmt::format("Unknown argument for 'scrub_mode' parameter: {}", scrub_mode_str));
|
||||
throw std::invalid_argument(fmt::format("Unknown argument for 'scrub_mode' parameter: {}", scrub_mode_str));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1438,39 +1388,20 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
} else if (quarantine_mode_str == "ONLY") {
|
||||
opts.quarantine_operation_mode = sstables::compaction_type_options::scrub::quarantine_mode::only;
|
||||
} else {
|
||||
throw httpd::bad_param_exception(fmt::format("Unknown argument for 'quarantine_mode' parameter: {}", quarantine_mode_str));
|
||||
throw std::invalid_argument(fmt::format("Unknown argument for 'quarantine_mode' parameter: {}", quarantine_mode_str));
|
||||
}
|
||||
|
||||
const auto& reduce_compaction_stats = [] (const compaction_manager::compaction_stats_opt& lhs, const compaction_manager::compaction_stats_opt& rhs) {
|
||||
sstables::compaction_stats stats{};
|
||||
stats += lhs.value();
|
||||
stats += rhs.value();
|
||||
return stats;
|
||||
};
|
||||
|
||||
return f.then([&ctx, keyspace, column_families, opts, &reduce_compaction_stats] {
|
||||
return ctx.db.map_reduce0([=] (replica::database& db) {
|
||||
return map_reduce(column_families, [=, &db] (sstring cfname) {
|
||||
return f.then([&ctx, keyspace, column_families, opts] {
|
||||
return ctx.db.invoke_on_all([=] (replica::database& db) {
|
||||
return do_for_each(column_families, [=, &db](sstring cfname) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
auto& cf = db.find_column_family(keyspace, cfname);
|
||||
return cm.perform_sstable_scrub(cf.as_table_state(), opts);
|
||||
}, std::make_optional(sstables::compaction_stats{}), reduce_compaction_stats);
|
||||
}, std::make_optional(sstables::compaction_stats{}), reduce_compaction_stats);
|
||||
}).then_wrapped([] (auto f) {
|
||||
if (f.failed()) {
|
||||
auto ex = f.get_exception();
|
||||
if (try_catch<sstables::compaction_aborted_exception>(ex)) {
|
||||
return make_ready_future<json::json_return_type>(static_cast<int>(scrub_status::aborted));
|
||||
} else {
|
||||
return make_exception_future<json::json_return_type>(std::move(ex));
|
||||
}
|
||||
} else if (f.get()->validation_errors) {
|
||||
return make_ready_future<json::json_return_type>(static_cast<int>(scrub_status::validation_errors));
|
||||
} else {
|
||||
return make_ready_future<json::json_return_type>(static_cast<int>(scrub_status::successful));
|
||||
}
|
||||
return cm.perform_sstable_scrub(&cf, opts);
|
||||
});
|
||||
});
|
||||
}).then([]{
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
});
|
||||
}));
|
||||
}
|
||||
|
||||
void unset_snapshot(http_context& ctx, routes& r) {
|
||||
|
||||
@@ -19,7 +19,6 @@ class snapshot_ctl;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
@@ -43,7 +42,7 @@ sstring validate_keyspace(http_context& ctx, const parameters& param);
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name);
|
||||
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs, sharded<db::system_keyspace>& sys_ls);
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs);
|
||||
void set_sstables_loader(http_context& ctx, routes& r, sharded<sstables_loader>& sst_loader);
|
||||
void unset_sstables_loader(http_context& ctx, routes& r);
|
||||
void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_builder>& vb);
|
||||
|
||||
@@ -66,48 +66,36 @@ atomic_cell::atomic_cell(const abstract_type& type, atomic_cell_view other)
|
||||
set_view(_data);
|
||||
}
|
||||
|
||||
// Based on Cassandra's resolveRegular function:
|
||||
// - https://github.com/apache/cassandra/blob/e4f31b73c21b04966269c5ac2d3bd2562e5f6c63/src/java/org/apache/cassandra/db/rows/Cells.java#L79-L119
|
||||
//
|
||||
// Note: the ordering algorithm for cell is the same as for rows,
|
||||
// except that the cell value is used to break a tie in case all other attributes are equal.
|
||||
// See compare_row_marker_for_merge.
|
||||
// Based on:
|
||||
// - org.apache.cassandra.db.AbstractCell#reconcile()
|
||||
// - org.apache.cassandra.db.BufferExpiringCell#reconcile()
|
||||
// - org.apache.cassandra.db.BufferDeletedCell#reconcile()
|
||||
std::strong_ordering
|
||||
compare_atomic_cell_for_merge(atomic_cell_view left, atomic_cell_view right) {
|
||||
// Largest write timestamp wins.
|
||||
if (left.timestamp() != right.timestamp()) {
|
||||
return left.timestamp() <=> right.timestamp();
|
||||
}
|
||||
// Tombstones always win reconciliation with live cells of the same timestamp
|
||||
if (left.is_live() != right.is_live()) {
|
||||
return left.is_live() ? std::strong_ordering::less : std::strong_ordering::greater;
|
||||
}
|
||||
if (left.is_live()) {
|
||||
// Prefer expiring cells (which will become tombstones at some future date) over live cells.
|
||||
// See https://issues.apache.org/jira/browse/CASSANDRA-14592
|
||||
auto c = compare_unsigned(left.value(), right.value()) <=> 0;
|
||||
if (c != 0) {
|
||||
return c;
|
||||
}
|
||||
if (left.is_live_and_has_ttl() != right.is_live_and_has_ttl()) {
|
||||
// prefer expiring cells.
|
||||
return left.is_live_and_has_ttl() ? std::strong_ordering::greater : std::strong_ordering::less;
|
||||
}
|
||||
// If both are expiring, choose the cell with the latest expiry or derived write time.
|
||||
if (left.is_live_and_has_ttl()) {
|
||||
// Prefer cell with latest expiry
|
||||
if (left.expiry() != right.expiry()) {
|
||||
return left.expiry() <=> right.expiry();
|
||||
} else if (right.ttl() != left.ttl()) {
|
||||
// The cell write time is derived by (expiry - ttl).
|
||||
// Prefer the cell that was written later,
|
||||
// so it survives longer after it expires, until purged,
|
||||
// as it become purgeable gc_grace_seconds after it was written.
|
||||
//
|
||||
// Note that this is an extension to Cassandra's algorithm
|
||||
// which stops at the expiration time, and if equal,
|
||||
// move forward to compare the cell values.
|
||||
} else {
|
||||
// prefer the cell that was written later,
|
||||
// so it survives longer after it expires, until purged.
|
||||
return right.ttl() <=> left.ttl();
|
||||
}
|
||||
}
|
||||
// The cell with the largest value wins, if all other attributes of the cells are identical.
|
||||
// This is quite arbitrary, but still required to break the tie in a deterministic way.
|
||||
return compare_unsigned(left.value(), right.value());
|
||||
} else {
|
||||
// Both are deleted
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
@@ -74,7 +77,7 @@ future<bool> default_authorizer::any_granted() const {
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{},
|
||||
cql3::query_processor::cache_internal::yes).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
true).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return !results->empty();
|
||||
});
|
||||
}
|
||||
@@ -85,8 +88,7 @@ future<> default_authorizer::migrate_legacy_metadata() const {
|
||||
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
cql3::query_processor::cache_internal::no).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
db::consistency_level::LOCAL_ONE).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
return do_with(
|
||||
row.get_as<sstring>("username"),
|
||||
@@ -166,8 +168,7 @@ default_authorizer::authorize(const role_or_anonymous& maybe_role, const resourc
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{*maybe_role.name, r.name()},
|
||||
cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
{*maybe_role.name, r.name()}).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return permissions::NONE;
|
||||
}
|
||||
@@ -196,8 +197,7 @@ default_authorizer::modify(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_query_state(),
|
||||
{permissions::to_strings(set), sstring(role_name), resource.name()},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{permissions::to_strings(set), sstring(role_name), resource.name()}).discard_result();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ future<std::vector<permission_details>> default_authorizer::list_all() const {
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_query_state(),
|
||||
{},
|
||||
cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
true).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
std::vector<permission_details> all_details;
|
||||
|
||||
for (const auto& row : *results) {
|
||||
@@ -249,8 +249,7 @@ future<> default_authorizer::revoke_all(std::string_view role_name) const {
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result().handle_exception([role_name](auto ep) {
|
||||
{sstring(role_name)}).discard_result().handle_exception([role_name](auto ep) {
|
||||
try {
|
||||
std::rethrow_exception(ep);
|
||||
} catch (exceptions::request_execution_exception& e) {
|
||||
@@ -269,8 +268,7 @@ future<> default_authorizer::revoke_all(const resource& resource) const {
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{resource.name()},
|
||||
cql3::query_processor::cache_internal::no).then_wrapped([this, resource](future<::shared_ptr<cql3::untyped_result_set>> f) {
|
||||
{resource.name()}).then_wrapped([this, resource](future<::shared_ptr<cql3::untyped_result_set>> f) {
|
||||
try {
|
||||
auto res = f.get0();
|
||||
return parallel_for_each(
|
||||
@@ -286,8 +284,7 @@ future<> default_authorizer::revoke_all(const resource& resource) const {
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
{r.get_as<sstring>(ROLE_NAME), resource.name()},
|
||||
cql3::query_processor::cache_internal::no).discard_result().handle_exception(
|
||||
{r.get_as<sstring>(ROLE_NAME), resource.name()}).discard_result().handle_exception(
|
||||
[resource](auto ep) {
|
||||
try {
|
||||
std::rethrow_exception(ep);
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
@@ -84,8 +87,7 @@ future<> password_authenticator::migrate_legacy_metadata() const {
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
cql3::query_processor::cache_internal::no).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
internal_distributed_query_state()).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
auto username = row.get_as<sstring>("username");
|
||||
auto salted_hash = row.get_as<sstring>(SALTED_HASH);
|
||||
@@ -94,8 +96,7 @@ future<> password_authenticator::migrate_legacy_metadata() const {
|
||||
update_row_query(),
|
||||
consistency_for_user(username),
|
||||
internal_distributed_query_state(),
|
||||
{std::move(salted_hash), username},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{std::move(salted_hash), username}).discard_result();
|
||||
}).finally([results] {});
|
||||
}).then([] {
|
||||
plogger.info("Finished migrating legacy authentication metadata.");
|
||||
@@ -112,8 +113,7 @@ future<> password_authenticator::create_default_if_missing() const {
|
||||
update_row_query(),
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
{passwords::hash(DEFAULT_USER_PASSWORD, rng_for_salt), DEFAULT_USER_NAME},
|
||||
cql3::query_processor::cache_internal::no).then([](auto&&) {
|
||||
{passwords::hash(DEFAULT_USER_PASSWORD, rng_for_salt), DEFAULT_USER_NAME}).then([](auto&&) {
|
||||
plogger.info("Created default superuser authentication record.");
|
||||
});
|
||||
}
|
||||
@@ -211,7 +211,7 @@ future<authenticated_user> password_authenticator::authenticate(
|
||||
consistency_for_user(username),
|
||||
internal_distributed_query_state(),
|
||||
{username},
|
||||
cql3::query_processor::cache_internal::yes);
|
||||
true);
|
||||
}).then_wrapped([=](future<::shared_ptr<cql3::untyped_result_set>> f) {
|
||||
try {
|
||||
auto res = f.get0();
|
||||
@@ -244,8 +244,7 @@ future<> password_authenticator::create(std::string_view role_name, const authen
|
||||
update_row_query(),
|
||||
consistency_for_user(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}).discard_result();
|
||||
}
|
||||
|
||||
future<> password_authenticator::alter(std::string_view role_name, const authentication_options& options) const {
|
||||
@@ -262,8 +261,7 @@ future<> password_authenticator::alter(std::string_view role_name, const authent
|
||||
query,
|
||||
consistency_for_user(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}).discard_result();
|
||||
}
|
||||
|
||||
future<> password_authenticator::drop(std::string_view name) const {
|
||||
@@ -275,8 +273,7 @@ future<> password_authenticator::drop(std::string_view name) const {
|
||||
return _qp.execute_internal(
|
||||
query, consistency_for_user(name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{sstring(name)}).discard_result();
|
||||
}
|
||||
|
||||
future<custom_options> password_authenticator::query_custom_options(std::string_view role_name) const {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -14,21 +14,13 @@
|
||||
|
||||
namespace auth {
|
||||
|
||||
permissions_cache::permissions_cache(const utils::loading_cache_config& c, service& ser, logging::logger& log)
|
||||
: _cache(c, log, [&ser, &log](const key_type& k) {
|
||||
permissions_cache::permissions_cache(const permissions_cache_config& c, service& ser, logging::logger& log)
|
||||
: _cache(c.max_entries, c.validity_period, c.update_period, log, [&ser, &log](const key_type& k) {
|
||||
log.debug("Refreshing permissions for {}", k.first);
|
||||
return ser.get_uncached_permissions(k.first, k.second);
|
||||
}) {
|
||||
}
|
||||
|
||||
bool permissions_cache::update_config(utils::loading_cache_config c) {
|
||||
return _cache.update_config(std::move(c));
|
||||
}
|
||||
|
||||
void permissions_cache::reset() {
|
||||
_cache.reset();
|
||||
}
|
||||
|
||||
future<permission_set> permissions_cache::get(const role_or_anonymous& maybe_role, const resource& r) {
|
||||
return do_with(key_type(maybe_role, r), [this](const auto& k) {
|
||||
return _cache.get(k);
|
||||
|
||||
@@ -44,6 +44,12 @@ namespace auth {
|
||||
|
||||
class service;
|
||||
|
||||
struct permissions_cache_config final {
|
||||
std::size_t max_entries;
|
||||
std::chrono::milliseconds validity_period;
|
||||
std::chrono::milliseconds update_period;
|
||||
};
|
||||
|
||||
class permissions_cache final {
|
||||
using cache_type = utils::loading_cache<
|
||||
std::pair<role_or_anonymous, resource>,
|
||||
@@ -58,14 +64,12 @@ class permissions_cache final {
|
||||
cache_type _cache;
|
||||
|
||||
public:
|
||||
explicit permissions_cache(const utils::loading_cache_config&, service&, logging::logger&);
|
||||
explicit permissions_cache(const permissions_cache_config&, service&, logging::logger&);
|
||||
|
||||
future <> stop() {
|
||||
return _cache.stop();
|
||||
}
|
||||
|
||||
bool update_config(utils::loading_cache_config);
|
||||
void reset();
|
||||
future<permission_set> get(const role_or_anonymous&, const resource&);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -55,16 +55,15 @@ future<bool> default_role_row_satisfies(
|
||||
return qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::ONE,
|
||||
internal_distributed_query_state(),
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::yes).then([&qp, &p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
true).then([&qp, &p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::yes).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
true).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
@@ -87,8 +86,7 @@ future<bool> any_nondefault_role_row_satisfies(
|
||||
return qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
cql3::query_processor::cache_internal::no).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
internal_distributed_query_state()).then([&p](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2019-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2019-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "auth/role_or_anonymous.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/consistency_level_type.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "log.hh"
|
||||
@@ -101,28 +100,23 @@ static future<> validate_role_exists(const service& ser, std::string_view role_n
|
||||
}
|
||||
|
||||
service::service(
|
||||
utils::loading_cache_config c,
|
||||
permissions_cache_config c,
|
||||
cql3::query_processor& qp,
|
||||
::service::migration_notifier& mn,
|
||||
std::unique_ptr<authorizer> z,
|
||||
std::unique_ptr<authenticator> a,
|
||||
std::unique_ptr<role_manager> r)
|
||||
: _loading_cache_config(std::move(c))
|
||||
: _permissions_cache_config(std::move(c))
|
||||
, _permissions_cache(nullptr)
|
||||
, _qp(qp)
|
||||
, _mnotifier(mn)
|
||||
, _authorizer(std::move(z))
|
||||
, _authenticator(std::move(a))
|
||||
, _role_manager(std::move(r))
|
||||
, _migration_listener(std::make_unique<auth_migration_listener>(*_authorizer))
|
||||
, _permissions_cache_cfg_cb([this] (uint32_t) { (void) _permissions_cache_config_action.trigger_later(); })
|
||||
, _permissions_cache_config_action([this] { update_cache_config(); return make_ready_future<>(); })
|
||||
, _permissions_cache_max_entries_observer(_qp.db().get_config().permissions_cache_max_entries.observe(_permissions_cache_cfg_cb))
|
||||
, _permissions_cache_update_interval_in_ms_observer(_qp.db().get_config().permissions_update_interval_in_ms.observe(_permissions_cache_cfg_cb))
|
||||
, _permissions_cache_validity_in_ms_observer(_qp.db().get_config().permissions_validity_in_ms.observe(_permissions_cache_cfg_cb)) {}
|
||||
, _migration_listener(std::make_unique<auth_migration_listener>(*_authorizer)) {}
|
||||
|
||||
service::service(
|
||||
utils::loading_cache_config c,
|
||||
permissions_cache_config c,
|
||||
cql3::query_processor& qp,
|
||||
::service::migration_notifier& mn,
|
||||
::service::migration_manager& mm,
|
||||
@@ -166,7 +160,7 @@ future<> service::start(::service::migration_manager& mm) {
|
||||
return when_all_succeed(_authorizer->start(), _authenticator->start()).discard_result();
|
||||
});
|
||||
}).then([this] {
|
||||
_permissions_cache = std::make_unique<permissions_cache>(_loading_cache_config, *this, log);
|
||||
_permissions_cache = std::make_unique<permissions_cache>(_permissions_cache_config, *this, log);
|
||||
}).then([this] {
|
||||
return once_among_shards([this] {
|
||||
_mnotifier.register_listener(_migration_listener.get());
|
||||
@@ -188,24 +182,6 @@ future<> service::stop() {
|
||||
});
|
||||
}
|
||||
|
||||
void service::update_cache_config() {
|
||||
auto db = _qp.db();
|
||||
|
||||
utils::loading_cache_config perm_cache_config;
|
||||
perm_cache_config.max_size = db.get_config().permissions_cache_max_entries();
|
||||
perm_cache_config.expiry = std::chrono::milliseconds(db.get_config().permissions_validity_in_ms());
|
||||
perm_cache_config.refresh = std::chrono::milliseconds(db.get_config().permissions_update_interval_in_ms());
|
||||
|
||||
if (!_permissions_cache->update_config(std::move(perm_cache_config))) {
|
||||
log.error("Failed to apply permissions cache changes. Please read the documentation of these parameters");
|
||||
}
|
||||
}
|
||||
|
||||
void service::reset_authorization_cache() {
|
||||
_permissions_cache->reset();
|
||||
_qp.reset_cache();
|
||||
}
|
||||
|
||||
future<bool> service::has_existing_legacy_users() const {
|
||||
if (!_qp.db().has_schema(meta::AUTH_KS, meta::USERS_CF)) {
|
||||
return make_ready_future<bool>(false);
|
||||
@@ -227,7 +203,7 @@ future<bool> service::has_existing_legacy_users() const {
|
||||
default_user_query,
|
||||
db::consistency_level::ONE,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::yes).then([this](auto results) {
|
||||
true).then([this](auto results) {
|
||||
if (!results->empty()) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
@@ -236,15 +212,14 @@ future<bool> service::has_existing_legacy_users() const {
|
||||
default_user_query,
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::yes).then([this](auto results) {
|
||||
true).then([this](auto results) {
|
||||
if (!results->empty()) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
all_users_query,
|
||||
db::consistency_level::QUORUM,
|
||||
cql3::query_processor::cache_internal::no).then([](auto results) {
|
||||
db::consistency_level::QUORUM).then([](auto results) {
|
||||
return make_ready_future<bool>(!results->empty());
|
||||
});
|
||||
});
|
||||
|
||||
@@ -23,8 +23,6 @@
|
||||
#include "auth/permissions_cache.hh"
|
||||
#include "auth/role_manager.hh"
|
||||
#include "seastarx.hh"
|
||||
#include "utils/observable.hh"
|
||||
#include "utils/serialized_action.hh"
|
||||
|
||||
namespace cql3 {
|
||||
class query_processor;
|
||||
@@ -70,7 +68,7 @@ public:
|
||||
/// peering_sharded_service inheritance is needed to be able to access shard local authentication service
|
||||
/// given an object from another shard. Used for bouncing lwt requests to correct shard.
|
||||
class service final : public seastar::peering_sharded_service<service> {
|
||||
utils::loading_cache_config _loading_cache_config;
|
||||
permissions_cache_config _permissions_cache_config;
|
||||
std::unique_ptr<permissions_cache> _permissions_cache;
|
||||
|
||||
cql3::query_processor& _qp;
|
||||
@@ -86,16 +84,9 @@ class service final : public seastar::peering_sharded_service<service> {
|
||||
// Only one of these should be registered, so we end up with some unused instances. Not the end of the world.
|
||||
std::unique_ptr<::service::migration_listener> _migration_listener;
|
||||
|
||||
std::function<void(uint32_t)> _permissions_cache_cfg_cb;
|
||||
serialized_action _permissions_cache_config_action;
|
||||
|
||||
utils::observer<uint32_t> _permissions_cache_max_entries_observer;
|
||||
utils::observer<uint32_t> _permissions_cache_update_interval_in_ms_observer;
|
||||
utils::observer<uint32_t> _permissions_cache_validity_in_ms_observer;
|
||||
|
||||
public:
|
||||
service(
|
||||
utils::loading_cache_config,
|
||||
permissions_cache_config,
|
||||
cql3::query_processor&,
|
||||
::service::migration_notifier&,
|
||||
std::unique_ptr<authorizer>,
|
||||
@@ -108,7 +99,7 @@ public:
|
||||
/// of the instances themselves.
|
||||
///
|
||||
service(
|
||||
utils::loading_cache_config,
|
||||
permissions_cache_config,
|
||||
cql3::query_processor&,
|
||||
::service::migration_notifier&,
|
||||
::service::migration_manager&,
|
||||
@@ -118,10 +109,6 @@ public:
|
||||
|
||||
future<> stop();
|
||||
|
||||
void update_cache_config();
|
||||
|
||||
void reset_authorization_cache();
|
||||
|
||||
///
|
||||
/// \returns an exceptional future with \ref nonexistant_role if the named role does not exist.
|
||||
///
|
||||
|
||||
@@ -95,7 +95,7 @@ static future<std::optional<record>> find_record(cql3::query_processor& qp, std:
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
true).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
if (results->empty()) {
|
||||
return std::optional<record>();
|
||||
}
|
||||
@@ -178,8 +178,7 @@ future<> standard_role_manager::create_default_role_if_missing() const {
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::no).then([](auto&&) {
|
||||
{meta::DEFAULT_SUPERUSER_NAME}).then([](auto&&) {
|
||||
log.info("Created default superuser role '{}'.", meta::DEFAULT_SUPERUSER_NAME);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
@@ -205,8 +204,7 @@ future<> standard_role_manager::migrate_legacy_metadata() const {
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
cql3::query_processor::cache_internal::no).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
internal_distributed_query_state()).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||
role_config config;
|
||||
config.is_superuser = row.get_or<bool>("super", false);
|
||||
@@ -269,7 +267,7 @@ future<> standard_role_manager::create_or_replace(std::string_view role_name, co
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name), c.is_superuser, c.can_login},
|
||||
cql3::query_processor::cache_internal::yes).discard_result();
|
||||
true).discard_result();
|
||||
}
|
||||
|
||||
future<>
|
||||
@@ -311,8 +309,7 @@ standard_role_manager::alter(std::string_view role_name, const role_config_updat
|
||||
meta::roles_table::role_col_name),
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{sstring(role_name)}).discard_result();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -331,8 +328,7 @@ future<> standard_role_manager::drop(std::string_view role_name) {
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).then([this, role_name](::shared_ptr<cql3::untyped_result_set> members) {
|
||||
{sstring(role_name)}).then([this, role_name](::shared_ptr<cql3::untyped_result_set> members) {
|
||||
return parallel_for_each(
|
||||
members->begin(),
|
||||
members->end(),
|
||||
@@ -364,7 +360,7 @@ future<> standard_role_manager::drop(std::string_view role_name) {
|
||||
// Delete all attributes for that role
|
||||
const auto remove_attributes_of = [this, role_name] {
|
||||
static const sstring query = format("DELETE FROM {} WHERE role = ?", meta::role_attributes_table::qualified_name());
|
||||
return _qp.execute_internal(query, {sstring(role_name)}, cql3::query_processor::cache_internal::yes).discard_result();
|
||||
return _qp.execute_internal(query, {sstring(role_name)}).discard_result();
|
||||
};
|
||||
|
||||
// Finally, delete the role itself.
|
||||
@@ -377,8 +373,7 @@ future<> standard_role_manager::drop(std::string_view role_name) {
|
||||
query,
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{sstring(role_name)}).discard_result();
|
||||
};
|
||||
|
||||
return when_all_succeed(revoke_from_members(), revoke_members_of(),
|
||||
@@ -406,8 +401,7 @@ standard_role_manager::modify_membership(
|
||||
query,
|
||||
consistency_for_role(grantee_name),
|
||||
internal_distributed_query_state(),
|
||||
{role_set{sstring(role_name)}, sstring(grantee_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{role_set{sstring(role_name)}, sstring(grantee_name)}).discard_result();
|
||||
};
|
||||
|
||||
const auto modify_role_members = [this, role_name, grantee_name, ch] {
|
||||
@@ -418,8 +412,7 @@ standard_role_manager::modify_membership(
|
||||
meta::role_members_table::qualified_name),
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name), sstring(grantee_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{sstring(role_name), sstring(grantee_name)}).discard_result();
|
||||
|
||||
case membership_change::remove:
|
||||
return _qp.execute_internal(
|
||||
@@ -427,8 +420,7 @@ standard_role_manager::modify_membership(
|
||||
meta::role_members_table::qualified_name),
|
||||
consistency_for_role(role_name),
|
||||
internal_distributed_query_state(),
|
||||
{sstring(role_name), sstring(grantee_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
{sstring(role_name), sstring(grantee_name)}).discard_result();
|
||||
}
|
||||
|
||||
return make_ready_future<>();
|
||||
@@ -530,8 +522,7 @@ future<role_set> standard_role_manager::query_all() {
|
||||
return _qp.execute_internal(
|
||||
query,
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
internal_distributed_query_state()).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||
role_set roles;
|
||||
|
||||
std::transform(
|
||||
@@ -566,7 +557,7 @@ future<bool> standard_role_manager::can_login(std::string_view role_name) {
|
||||
|
||||
future<std::optional<sstring>> standard_role_manager::get_attribute(std::string_view role_name, std::string_view attribute_name) {
|
||||
static const sstring query = format("SELECT name, value FROM {} WHERE role = ? AND name = ?", meta::role_attributes_table::qualified_name());
|
||||
return _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name)}, cql3::query_processor::cache_internal::yes).then([] (shared_ptr<cql3::untyped_result_set> result_set) {
|
||||
return _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name)}).then([] (shared_ptr<cql3::untyped_result_set> result_set) {
|
||||
if (!result_set->empty()) {
|
||||
const cql3::untyped_result_set_row &row = result_set->one();
|
||||
return std::optional<sstring>(row.get_as<sstring>("value"));
|
||||
@@ -599,7 +590,7 @@ future<> standard_role_manager::set_attribute(std::string_view role_name, std::s
|
||||
if (!role_exists) {
|
||||
throw auth::nonexistant_role(role_name);
|
||||
}
|
||||
return _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name), sstring(attribute_value)}, cql3::query_processor::cache_internal::yes).discard_result();
|
||||
return _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name), sstring(attribute_value)}).discard_result();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -612,7 +603,7 @@ future<> standard_role_manager::remove_attribute(std::string_view role_name, std
|
||||
if (!role_exists) {
|
||||
throw auth::nonexistant_role(role_name);
|
||||
}
|
||||
return _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name)}, cql3::query_processor::cache_internal::yes).discard_result();
|
||||
return _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name)}).discard_result();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2017-present ScyllaDB
|
||||
*
|
||||
|
||||
@@ -37,27 +37,19 @@
|
||||
// The constants q1 and q2 are used to determine the proportional factor at each stage.
|
||||
class backlog_controller {
|
||||
public:
|
||||
struct scheduling_group {
|
||||
seastar::scheduling_group cpu = default_scheduling_group();
|
||||
seastar::io_priority_class io = default_priority_class();
|
||||
};
|
||||
future<> shutdown() {
|
||||
_update_timer.cancel();
|
||||
return std::move(_inflight_update);
|
||||
}
|
||||
|
||||
future<> update_static_shares(float static_shares) {
|
||||
_static_shares = static_shares;
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
protected:
|
||||
struct control_point {
|
||||
float input;
|
||||
float output;
|
||||
};
|
||||
|
||||
scheduling_group _scheduling_group;
|
||||
seastar::scheduling_group _scheduling_group;
|
||||
const ::io_priority_class& _io_priority;
|
||||
std::chrono::milliseconds _interval;
|
||||
timer<> _update_timer;
|
||||
|
||||
std::vector<control_point> _control_points;
|
||||
@@ -66,36 +58,41 @@ protected:
|
||||
// updating shares for an I/O class may contact another shard and returns a future.
|
||||
future<> _inflight_update;
|
||||
|
||||
// Used when the controllers are disabled and a static share is used
|
||||
// When that option is deprecated we should remove this.
|
||||
float _static_shares;
|
||||
|
||||
virtual void update_controller(float quota);
|
||||
|
||||
bool controller_disabled() const noexcept {
|
||||
return _static_shares > 0;
|
||||
}
|
||||
|
||||
void adjust();
|
||||
|
||||
backlog_controller(scheduling_group sg, std::chrono::milliseconds interval,
|
||||
std::vector<control_point> control_points, std::function<float()> backlog,
|
||||
float static_shares = 0)
|
||||
: _scheduling_group(std::move(sg))
|
||||
backlog_controller(seastar::scheduling_group sg, const ::io_priority_class& iop, std::chrono::milliseconds interval,
|
||||
std::vector<control_point> control_points, std::function<float()> backlog)
|
||||
: _scheduling_group(sg)
|
||||
, _io_priority(iop)
|
||||
, _interval(interval)
|
||||
, _update_timer([this] { adjust(); })
|
||||
, _control_points()
|
||||
, _current_backlog(std::move(backlog))
|
||||
, _inflight_update(make_ready_future<>())
|
||||
, _static_shares(static_shares)
|
||||
{
|
||||
_control_points.insert(_control_points.end(), control_points.begin(), control_points.end());
|
||||
_update_timer.arm_periodic(interval);
|
||||
_update_timer.arm_periodic(_interval);
|
||||
}
|
||||
|
||||
// Used when the controllers are disabled and a static share is used
|
||||
// When that option is deprecated we should remove this.
|
||||
backlog_controller(seastar::scheduling_group sg, const ::io_priority_class& iop, float static_shares)
|
||||
: _scheduling_group(sg)
|
||||
, _io_priority(iop)
|
||||
, _inflight_update(make_ready_future<>())
|
||||
{
|
||||
update_controller(static_shares);
|
||||
}
|
||||
|
||||
virtual ~backlog_controller() {}
|
||||
public:
|
||||
backlog_controller(backlog_controller&&) = default;
|
||||
float backlog_of_shares(float shares) const;
|
||||
seastar::scheduling_group sg() {
|
||||
return _scheduling_group;
|
||||
}
|
||||
};
|
||||
|
||||
// memtable flush CPU controller.
|
||||
@@ -116,11 +113,11 @@ public:
|
||||
class flush_controller : public backlog_controller {
|
||||
static constexpr float hard_dirty_limit = 1.0f;
|
||||
public:
|
||||
flush_controller(backlog_controller::scheduling_group sg, float static_shares, std::chrono::milliseconds interval, float soft_limit, std::function<float()> current_dirty)
|
||||
: backlog_controller(std::move(sg), std::move(interval),
|
||||
flush_controller(seastar::scheduling_group sg, const ::io_priority_class& iop, float static_shares) : backlog_controller(sg, iop, static_shares) {}
|
||||
flush_controller(seastar::scheduling_group sg, const ::io_priority_class& iop, std::chrono::milliseconds interval, float soft_limit, std::function<float()> current_dirty)
|
||||
: backlog_controller(sg, iop, std::move(interval),
|
||||
std::vector<backlog_controller::control_point>({{0.0, 0.0}, {soft_limit, 10}, {soft_limit + (hard_dirty_limit - soft_limit) / 2, 200} , {hard_dirty_limit, 1000}}),
|
||||
std::move(current_dirty),
|
||||
static_shares
|
||||
std::move(current_dirty)
|
||||
)
|
||||
{}
|
||||
};
|
||||
@@ -130,11 +127,11 @@ public:
|
||||
static constexpr unsigned normalization_factor = 30;
|
||||
static constexpr float disable_backlog = std::numeric_limits<double>::infinity();
|
||||
static constexpr float backlog_disabled(float backlog) { return std::isinf(backlog); }
|
||||
compaction_controller(backlog_controller::scheduling_group sg, float static_shares, std::chrono::milliseconds interval, std::function<float()> current_backlog)
|
||||
: backlog_controller(std::move(sg), std::move(interval),
|
||||
compaction_controller(seastar::scheduling_group sg, const ::io_priority_class& iop, float static_shares) : backlog_controller(sg, iop, static_shares) {}
|
||||
compaction_controller(seastar::scheduling_group sg, const ::io_priority_class& iop, std::chrono::milliseconds interval, std::function<float()> current_backlog)
|
||||
: backlog_controller(sg, iop, std::move(interval),
|
||||
std::vector<backlog_controller::control_point>({{0.0, 50}, {1.5, 100} , {normalization_factor, 1000}}),
|
||||
std::move(current_backlog),
|
||||
static_shares
|
||||
std::move(current_backlog)
|
||||
)
|
||||
{}
|
||||
};
|
||||
|
||||
148
bytes_ostream.hh
148
bytes_ostream.hh
@@ -11,11 +11,9 @@
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
|
||||
#include "bytes.hh"
|
||||
#include "utils/managed_bytes.hh"
|
||||
#include "hashing.hh"
|
||||
#include <seastar/core/simple-stream.hh>
|
||||
#include <seastar/core/loop.hh>
|
||||
#include <bit>
|
||||
#include <concepts>
|
||||
|
||||
/**
|
||||
@@ -33,15 +31,26 @@ public:
|
||||
static constexpr size_type max_chunk_size() { return max_alloc_size() - sizeof(chunk); }
|
||||
private:
|
||||
static_assert(sizeof(value_type) == 1, "value_type is assumed to be one byte long");
|
||||
// Note: while appending data, chunk::size refers to the allocated space in the chunk,
|
||||
// and chunk::frag_size refers to the currently occupied space in the chunk.
|
||||
// After building, the first chunk::size is the whole object size, and chunk::frag_size
|
||||
// doesn't change. This fits with managed_bytes interpretation.
|
||||
using chunk = blob_storage;
|
||||
struct chunk {
|
||||
// FIXME: group fragment pointers to reduce pointer chasing when packetizing
|
||||
std::unique_ptr<chunk> next;
|
||||
~chunk() {
|
||||
auto p = std::move(next);
|
||||
while (p) {
|
||||
// Avoid recursion when freeing chunks
|
||||
auto p_next = std::move(p->next);
|
||||
p = std::move(p_next);
|
||||
}
|
||||
}
|
||||
size_type offset; // Also means "size" after chunk is closed
|
||||
size_type size;
|
||||
value_type data[0];
|
||||
void operator delete(void* ptr) { free(ptr); }
|
||||
};
|
||||
static constexpr size_type default_chunk_size{512};
|
||||
static constexpr size_type max_alloc_size() { return 128 * 1024; }
|
||||
private:
|
||||
blob_storage::ref_type _begin;
|
||||
std::unique_ptr<chunk> _begin;
|
||||
chunk* _current;
|
||||
size_type _size;
|
||||
size_type _initial_chunk_size = default_chunk_size;
|
||||
@@ -61,13 +70,13 @@ public:
|
||||
fragment_iterator(const fragment_iterator&) = default;
|
||||
fragment_iterator& operator=(const fragment_iterator&) = default;
|
||||
bytes_view operator*() const {
|
||||
return { _current->data, _current->frag_size };
|
||||
return { _current->data, _current->offset };
|
||||
}
|
||||
bytes_view operator->() const {
|
||||
return *(*this);
|
||||
}
|
||||
fragment_iterator& operator++() {
|
||||
_current = _current->next;
|
||||
_current = _current->next.get();
|
||||
return *this;
|
||||
}
|
||||
fragment_iterator operator++(int) {
|
||||
@@ -110,21 +119,19 @@ private:
|
||||
if (!_current) {
|
||||
return 0;
|
||||
}
|
||||
return _current->size - _current->frag_size;
|
||||
return _current->size - _current->offset;
|
||||
}
|
||||
// Figure out next chunk size.
|
||||
// - must be enough for data_size + sizeof(chunk)
|
||||
// - must be at least _initial_chunk_size
|
||||
// - try to double each time to prevent too many allocations
|
||||
// - should not exceed max_alloc_size, unless data_size requires so
|
||||
// - will be power-of-two so the allocated memory can be fully utilized.
|
||||
size_type next_alloc_size(size_t data_size) const {
|
||||
auto next_size = _current
|
||||
? _current->size * 2
|
||||
: _initial_chunk_size;
|
||||
next_size = std::min(next_size, max_alloc_size());
|
||||
auto r = std::max<size_type>(next_size, data_size + sizeof(chunk));
|
||||
return std::bit_ceil(r);
|
||||
return std::max<size_type>(next_size, data_size + sizeof(chunk));
|
||||
}
|
||||
// Makes room for a contiguous region of given size.
|
||||
// The region is accounted for as already written.
|
||||
@@ -132,8 +139,8 @@ private:
|
||||
[[gnu::always_inline]]
|
||||
value_type* alloc(size_type size) {
|
||||
if (__builtin_expect(size <= current_space_left(), true)) {
|
||||
auto ret = _current->data + _current->frag_size;
|
||||
_current->frag_size += size;
|
||||
auto ret = _current->data + _current->offset;
|
||||
_current->offset += size;
|
||||
_size += size;
|
||||
return ret;
|
||||
} else {
|
||||
@@ -147,21 +154,19 @@ private:
|
||||
if (!space) {
|
||||
throw std::bad_alloc();
|
||||
}
|
||||
auto backref = _current ? &_current->next : &_begin;
|
||||
auto new_chunk = new (space) chunk(backref, alloc_size - sizeof(chunk), size);
|
||||
_current = new_chunk;
|
||||
auto new_chunk = std::unique_ptr<chunk>(new (space) chunk());
|
||||
new_chunk->offset = size;
|
||||
new_chunk->size = alloc_size - sizeof(chunk);
|
||||
if (_current) {
|
||||
_current->next = std::move(new_chunk);
|
||||
_current = _current->next.get();
|
||||
} else {
|
||||
_begin = std::move(new_chunk);
|
||||
_current = _begin.get();
|
||||
}
|
||||
_size += size;
|
||||
return _current->data;
|
||||
}
|
||||
[[gnu::noinline]]
|
||||
void free_chain(chunk* c) noexcept {
|
||||
while (c) {
|
||||
auto n = c->next;
|
||||
c->~chunk();
|
||||
::free(c);
|
||||
c = n;
|
||||
}
|
||||
}
|
||||
public:
|
||||
explicit bytes_ostream(size_t initial_chunk_size) noexcept
|
||||
: _begin()
|
||||
@@ -173,7 +178,7 @@ public:
|
||||
bytes_ostream() noexcept : bytes_ostream(default_chunk_size) {}
|
||||
|
||||
bytes_ostream(bytes_ostream&& o) noexcept
|
||||
: _begin(std::exchange(o._begin, {}))
|
||||
: _begin(std::move(o._begin))
|
||||
, _current(o._current)
|
||||
, _size(o._size)
|
||||
, _initial_chunk_size(o._initial_chunk_size)
|
||||
@@ -191,10 +196,6 @@ public:
|
||||
append(o);
|
||||
}
|
||||
|
||||
~bytes_ostream() {
|
||||
free_chain(_begin.ptr);
|
||||
}
|
||||
|
||||
bytes_ostream& operator=(const bytes_ostream& o) {
|
||||
if (this != &o) {
|
||||
auto x = bytes_ostream(o);
|
||||
@@ -242,8 +243,8 @@ public:
|
||||
|
||||
auto this_size = std::min(v.size(), size_t(current_space_left()));
|
||||
if (__builtin_expect(this_size, true)) {
|
||||
memcpy(_current->data + _current->frag_size, v.begin(), this_size);
|
||||
_current->frag_size += this_size;
|
||||
memcpy(_current->data + _current->offset, v.begin(), this_size);
|
||||
_current->offset += this_size;
|
||||
_size += this_size;
|
||||
v.remove_prefix(this_size);
|
||||
}
|
||||
@@ -286,20 +287,19 @@ public:
|
||||
throw std::bad_alloc();
|
||||
}
|
||||
|
||||
auto old_begin = _begin;
|
||||
auto new_chunk = new (space) chunk(&_begin, _size, _size);
|
||||
auto new_chunk = std::unique_ptr<chunk>(new (space) chunk());
|
||||
new_chunk->offset = _size;
|
||||
new_chunk->size = _size;
|
||||
|
||||
auto dst = new_chunk->data;
|
||||
auto r = old_begin.ptr;
|
||||
auto r = _begin.get();
|
||||
while (r) {
|
||||
auto next = r->next;
|
||||
dst = std::copy_n(r->data, r->frag_size, dst);
|
||||
r->~chunk();
|
||||
::free(r);
|
||||
auto next = r->next.get();
|
||||
dst = std::copy_n(r->data, r->offset, dst);
|
||||
r = next;
|
||||
}
|
||||
|
||||
_current = new_chunk;
|
||||
_current = new_chunk.get();
|
||||
_begin = std::move(new_chunk);
|
||||
return bytes_view(_current->data, _size);
|
||||
}
|
||||
@@ -333,23 +333,22 @@ public:
|
||||
void remove_suffix(size_t n) {
|
||||
_size -= n;
|
||||
auto left = _size;
|
||||
auto current = _begin.ptr;
|
||||
auto current = _begin.get();
|
||||
while (current) {
|
||||
if (current->frag_size >= left) {
|
||||
current->frag_size = left;
|
||||
if (current->offset >= left) {
|
||||
current->offset = left;
|
||||
_current = current;
|
||||
free_chain(current->next);
|
||||
current->next = nullptr;
|
||||
current->next.reset();
|
||||
return;
|
||||
}
|
||||
left -= current->frag_size;
|
||||
current = current->next;
|
||||
left -= current->offset;
|
||||
current = current->next.get();
|
||||
}
|
||||
}
|
||||
|
||||
// begin() and end() form an input range to bytes_view representing fragments.
|
||||
// Any modification of this instance invalidates iterators.
|
||||
fragment_iterator begin() const { return { _begin.ptr }; }
|
||||
fragment_iterator begin() const { return { _begin.get() }; }
|
||||
fragment_iterator end() const { return { nullptr }; }
|
||||
|
||||
output_iterator write_begin() { return output_iterator(*this); }
|
||||
@@ -364,7 +363,7 @@ public:
|
||||
};
|
||||
|
||||
position pos() const {
|
||||
return { _current, _current ? _current->frag_size : 0 };
|
||||
return { _current, _current ? _current->offset : 0 };
|
||||
}
|
||||
|
||||
// Returns the amount of bytes written since given position.
|
||||
@@ -374,11 +373,11 @@ public:
|
||||
if (!c) {
|
||||
return _size;
|
||||
}
|
||||
size_type total = c->frag_size - pos._offset;
|
||||
c = c->next;
|
||||
size_type total = c->offset - pos._offset;
|
||||
c = c->next.get();
|
||||
while (c) {
|
||||
total += c->frag_size;
|
||||
c = c->next;
|
||||
total += c->offset;
|
||||
c = c->next.get();
|
||||
}
|
||||
return total;
|
||||
}
|
||||
@@ -392,9 +391,8 @@ public:
|
||||
}
|
||||
_size -= written_since(pos);
|
||||
_current = pos._chunk;
|
||||
free_chain(_current->next);
|
||||
_current->next = nullptr;
|
||||
_current->frag_size = pos._offset;
|
||||
_current->offset = pos._offset;
|
||||
}
|
||||
|
||||
void reduce_chunk_count() {
|
||||
@@ -443,23 +441,11 @@ public:
|
||||
// the clear() calls then writes will not involve any memory allocations,
|
||||
// except for the first write made on this instance.
|
||||
void clear() {
|
||||
if (_begin.ptr) {
|
||||
_begin.ptr->frag_size = 0;
|
||||
if (_begin) {
|
||||
_begin->offset = 0;
|
||||
_size = 0;
|
||||
free_chain(_begin.ptr->next);
|
||||
_begin.ptr->next = nullptr;
|
||||
_current = _begin.ptr;
|
||||
}
|
||||
}
|
||||
|
||||
managed_bytes to_managed_bytes() && {
|
||||
if (_size) {
|
||||
_begin.ptr->size = _size;
|
||||
_current = nullptr;
|
||||
_size = 0;
|
||||
return managed_bytes(std::exchange(_begin.ptr, {}));
|
||||
} else {
|
||||
return managed_bytes();
|
||||
_current = _begin.get();
|
||||
_begin->next.reset();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -470,17 +456,15 @@ public:
|
||||
// the clear() calls then writes will not involve any memory allocations,
|
||||
// except for the first write made on this instance.
|
||||
future<> clear_gently() noexcept {
|
||||
if (!_begin.ptr) {
|
||||
if (!_begin) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
_begin->frag_size = 0;
|
||||
_current = _begin.ptr;
|
||||
_begin->offset = 0;
|
||||
_size = 0;
|
||||
return do_until([this] { return !_begin.ptr->next; }, [this] {
|
||||
auto second_chunk = _begin.ptr->next;
|
||||
auto next = second_chunk->next;
|
||||
second_chunk->~chunk();
|
||||
::free(second_chunk);
|
||||
return do_until([this] { return !_begin->next; }, [this] {
|
||||
// move next->next first to avoid it being recursively destroyed
|
||||
// in ~chunk when _begin->next is move-assigned.
|
||||
auto next = std::move(_begin->next->next);
|
||||
_begin->next = std::move(next);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
|
||||
@@ -10,19 +10,19 @@
|
||||
|
||||
#include <vector>
|
||||
#include "row_cache.hh"
|
||||
#include "mutation_reader.hh"
|
||||
#include "mutation_fragment.hh"
|
||||
#include "query-request.hh"
|
||||
#include "partition_snapshot_row_cursor.hh"
|
||||
#include "range_tombstone_assembler.hh"
|
||||
#include "read_context.hh"
|
||||
#include "readers/delegating_v2.hh"
|
||||
#include "flat_mutation_reader.hh"
|
||||
#include "clustering_key_filter.hh"
|
||||
|
||||
namespace cache {
|
||||
|
||||
extern logging::logger clogger;
|
||||
|
||||
class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
class cache_flat_mutation_reader final : public flat_mutation_reader::impl {
|
||||
enum class state {
|
||||
before_static_row,
|
||||
|
||||
@@ -51,46 +51,6 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
|
||||
end_of_stream
|
||||
};
|
||||
enum class source {
|
||||
cache = 0,
|
||||
underlying = 1,
|
||||
};
|
||||
// Merges range tombstone change streams coming from underlying and the cache.
|
||||
// Ensures no range tombstone change fragment is emitted when there is no
|
||||
// actual change in the effective tombstone.
|
||||
class range_tombstone_change_merger {
|
||||
const schema& _schema;
|
||||
position_in_partition _pos;
|
||||
tombstone _current_tombstone;
|
||||
std::array<tombstone, 2> _tombstones;
|
||||
private:
|
||||
std::optional<range_tombstone_change> do_flush(position_in_partition pos, bool end_of_range) {
|
||||
std::optional<range_tombstone_change> ret;
|
||||
position_in_partition::tri_compare cmp(_schema);
|
||||
const auto res = cmp(_pos, pos);
|
||||
const auto should_flush = end_of_range ? res <= 0 : res < 0;
|
||||
if (should_flush) {
|
||||
auto merged_tomb = std::max(_tombstones.front(), _tombstones.back());
|
||||
if (merged_tomb != _current_tombstone) {
|
||||
_current_tombstone = merged_tomb;
|
||||
ret.emplace(_pos, _current_tombstone);
|
||||
}
|
||||
_pos = std::move(pos);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
public:
|
||||
range_tombstone_change_merger(const schema& s) : _schema(s), _pos(position_in_partition::before_all_clustered_rows()), _tombstones{}
|
||||
{ }
|
||||
std::optional<range_tombstone_change> apply(source src, range_tombstone_change&& rtc) {
|
||||
auto ret = do_flush(rtc.position(), false);
|
||||
_tombstones[static_cast<size_t>(src)] = rtc.tombstone();
|
||||
return ret;
|
||||
}
|
||||
std::optional<range_tombstone_change> flush(position_in_partition_view pos, bool end_of_range) {
|
||||
return do_flush(position_in_partition(pos), end_of_range);
|
||||
}
|
||||
};
|
||||
partition_snapshot_ptr _snp;
|
||||
|
||||
query::clustering_key_filter_ranges _ck_ranges; // Query schema domain, reversed reads use native order
|
||||
@@ -106,7 +66,6 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
// range_tombstones with positions <= _lower_bound.
|
||||
position_in_partition _lower_bound; // Query schema domain
|
||||
position_in_partition_view _upper_bound; // Query schema domain
|
||||
std::optional<position_in_partition> _underlying_upper_bound; // Query schema domain
|
||||
|
||||
// cache_flat_mutation_reader may be constructed either
|
||||
// with a read_context&, where it knows that the read_context
|
||||
@@ -121,19 +80,6 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
read_context& _read_context;
|
||||
partition_snapshot_row_cursor _next_row;
|
||||
|
||||
range_tombstone_change_generator _rt_gen; // cache -> reader
|
||||
range_tombstone_assembler _rt_assembler; // underlying -> cache
|
||||
range_tombstone_change_merger _rt_merger; // {cache, underlying} -> reader
|
||||
|
||||
// When the read moves to the underlying, the read range will be
|
||||
// (_lower_bound, x], where x is either _next_row.position() or _upper_bound.
|
||||
// In the former case (x is _next_row.position()), underlying can emit
|
||||
// a range tombstone change for after_key(x), which is outside the range.
|
||||
// We can't push this fragment into the buffer straight away, the cache may
|
||||
// have fragments with smaller position. So we save it here and flush it when
|
||||
// a fragment with a larger position is seen.
|
||||
std::optional<mutation_fragment_v2> _queued_underlying_fragment;
|
||||
|
||||
state _state = state::before_static_row;
|
||||
|
||||
bool _next_row_in_range = false;
|
||||
@@ -152,8 +98,8 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
|
||||
// Points to the underlying reader conforming to _schema,
|
||||
// either to *_underlying_holder or _read_context.underlying().underlying().
|
||||
flat_mutation_reader_v2* _underlying = nullptr;
|
||||
flat_mutation_reader_v2_opt _underlying_holder;
|
||||
flat_mutation_reader* _underlying = nullptr;
|
||||
flat_mutation_reader_opt _underlying_holder;
|
||||
|
||||
future<> do_fill_buffer();
|
||||
future<> ensure_underlying();
|
||||
@@ -164,13 +110,11 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
void move_to_range(query::clustering_row_ranges::const_iterator);
|
||||
void move_to_next_entry();
|
||||
void maybe_drop_last_entry() noexcept;
|
||||
void flush_tombstones(position_in_partition_view, bool end_of_range = false);
|
||||
void add_to_buffer(const partition_snapshot_row_cursor&);
|
||||
void add_clustering_row_to_buffer(mutation_fragment_v2&&);
|
||||
void add_to_buffer(range_tombstone_change&&, source);
|
||||
void do_add_to_buffer(range_tombstone_change&&);
|
||||
void add_clustering_row_to_buffer(mutation_fragment&&);
|
||||
void add_to_buffer(range_tombstone&&);
|
||||
void add_range_tombstone_to_buffer(range_tombstone&&);
|
||||
void add_to_buffer(mutation_fragment_v2&&);
|
||||
void add_to_buffer(mutation_fragment&&);
|
||||
future<> read_from_underlying();
|
||||
void start_reading_from_underlying();
|
||||
bool after_current_range(position_in_partition_view position);
|
||||
@@ -187,9 +131,9 @@ class cache_flat_mutation_reader final : public flat_mutation_reader_v2::impl {
|
||||
// if !_read_context.is_reversed() then _last_row is valid after this or the population lower bound
|
||||
// is before all rows (so _last_row doesn't point at any entry).
|
||||
bool ensure_population_lower_bound();
|
||||
void maybe_add_to_cache(const mutation_fragment_v2& mf);
|
||||
void maybe_add_to_cache(const mutation_fragment& mf);
|
||||
void maybe_add_to_cache(const clustering_row& cr);
|
||||
void maybe_add_to_cache(const range_tombstone_change& rtc);
|
||||
void maybe_add_to_cache(const range_tombstone& rt);
|
||||
void maybe_add_to_cache(const static_row& sr);
|
||||
void maybe_set_static_row_continuous();
|
||||
void finish_reader() {
|
||||
@@ -233,7 +177,7 @@ public:
|
||||
read_context& ctx,
|
||||
partition_snapshot_ptr snp,
|
||||
row_cache& cache)
|
||||
: flat_mutation_reader_v2::impl(std::move(s), ctx.permit())
|
||||
: flat_mutation_reader::impl(std::move(s), ctx.permit())
|
||||
, _snp(std::move(snp))
|
||||
, _ck_ranges(std::move(crr))
|
||||
, _ck_ranges_curr(_ck_ranges.begin())
|
||||
@@ -244,8 +188,6 @@ public:
|
||||
, _read_context_holder()
|
||||
, _read_context(ctx) // ctx is owned by the caller, who's responsible for closing it.
|
||||
, _next_row(*_schema, *_snp, false, _read_context.is_reversed())
|
||||
, _rt_gen(*_schema)
|
||||
, _rt_merger(*_schema)
|
||||
{
|
||||
clogger.trace("csm {}: table={}.{}, reversed={}, snap={}", fmt::ptr(this), _schema->ks_name(), _schema->cf_name(), _read_context.is_reversed(),
|
||||
fmt::ptr(&*_snp));
|
||||
@@ -296,13 +238,13 @@ future<> cache_flat_mutation_reader::process_static_row() {
|
||||
return _snp->static_row(_read_context.digest_requested());
|
||||
});
|
||||
if (!sr.empty()) {
|
||||
push_mutation_fragment(*_schema, _permit, std::move(sr));
|
||||
push_mutation_fragment(mutation_fragment(*_schema, _permit, std::move(sr)));
|
||||
}
|
||||
return make_ready_future<>();
|
||||
} else {
|
||||
_read_context.cache().on_row_miss();
|
||||
return ensure_underlying().then([this] {
|
||||
return (*_underlying)().then([this] (mutation_fragment_v2_opt&& sr) {
|
||||
return (*_underlying)().then([this] (mutation_fragment_opt&& sr) {
|
||||
if (sr) {
|
||||
assert(sr->is_static_row());
|
||||
maybe_add_to_cache(sr->as_static_row());
|
||||
@@ -352,7 +294,7 @@ future<> cache_flat_mutation_reader::ensure_underlying() {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return _read_context.ensure_underlying().then([this] {
|
||||
flat_mutation_reader_v2& ctx_underlying = _read_context.underlying().underlying();
|
||||
flat_mutation_reader& ctx_underlying = _read_context.underlying().underlying();
|
||||
if (ctx_underlying.schema() != _schema) {
|
||||
_underlying_holder = make_delegating_reader(ctx_underlying);
|
||||
_underlying_holder->upgrade_schema(_schema);
|
||||
@@ -376,9 +318,9 @@ future<> cache_flat_mutation_reader::do_fill_buffer() {
|
||||
if (!_read_context.partition_exists()) {
|
||||
return read_from_underlying();
|
||||
}
|
||||
_underlying_upper_bound = _next_row_in_range ? position_in_partition(_next_row.position())
|
||||
auto end = _next_row_in_range ? position_in_partition(_next_row.position())
|
||||
: position_in_partition(_upper_bound);
|
||||
return _underlying->fast_forward_to(position_range{_lower_bound, *_underlying_upper_bound}).then([this] {
|
||||
return _underlying->fast_forward_to(position_range{_lower_bound, std::move(end)}).then([this] {
|
||||
return read_from_underlying();
|
||||
});
|
||||
}
|
||||
@@ -421,13 +363,12 @@ inline
|
||||
future<> cache_flat_mutation_reader::read_from_underlying() {
|
||||
return consume_mutation_fragments_until(*_underlying,
|
||||
[this] { return _state != state::reading_from_underlying || is_buffer_full(); },
|
||||
[this] (mutation_fragment_v2 mf) {
|
||||
[this] (mutation_fragment mf) {
|
||||
_read_context.cache().on_row_miss();
|
||||
maybe_add_to_cache(mf);
|
||||
add_to_buffer(std::move(mf));
|
||||
},
|
||||
[this] {
|
||||
_underlying_upper_bound.reset();
|
||||
_state = state::reading_from_cache;
|
||||
_lsa_manager.run_in_update_section([this] {
|
||||
auto same_pos = _next_row.maybe_refresh();
|
||||
@@ -554,9 +495,9 @@ void cache_flat_mutation_reader::maybe_update_continuity() {
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::maybe_add_to_cache(const mutation_fragment_v2& mf) {
|
||||
if (mf.is_range_tombstone_change()) {
|
||||
maybe_add_to_cache(mf.as_range_tombstone_change());
|
||||
void cache_flat_mutation_reader::maybe_add_to_cache(const mutation_fragment& mf) {
|
||||
if (mf.is_range_tombstone()) {
|
||||
maybe_add_to_cache(mf.as_range_tombstone());
|
||||
} else {
|
||||
assert(mf.is_clustering_row());
|
||||
const clustering_row& cr = mf.as_clustering_row();
|
||||
@@ -572,16 +513,9 @@ void cache_flat_mutation_reader::maybe_add_to_cache(const clustering_row& cr) {
|
||||
_read_context.cache().on_mispopulate();
|
||||
return;
|
||||
}
|
||||
auto rt_opt = _rt_assembler.flush(*_schema, position_in_partition::after_key(cr.key()));
|
||||
clogger.trace("csm {}: populate({})", fmt::ptr(this), clustering_row::printer(*_schema, cr));
|
||||
_lsa_manager.run_in_update_section_with_allocator([this, &cr, &rt_opt] {
|
||||
_lsa_manager.run_in_update_section_with_allocator([this, &cr] {
|
||||
mutation_partition& mp = _snp->version()->partition();
|
||||
|
||||
if (rt_opt) {
|
||||
clogger.trace("csm {}: populate flushed rt({})", fmt::ptr(this), *rt_opt);
|
||||
mp.mutable_row_tombstones().apply_monotonically(table_schema(), to_table_domain(range_tombstone(*rt_opt)));
|
||||
}
|
||||
|
||||
rows_entry::tri_compare cmp(table_schema());
|
||||
|
||||
if (_read_context.digest_requested()) {
|
||||
@@ -637,6 +571,11 @@ void cache_flat_mutation_reader::copy_from_cache_to_buffer() {
|
||||
position_in_partition_view next_lower_bound = _next_row.dummy() ? _next_row.position() : position_in_partition_view::after_key(_next_row.key());
|
||||
auto upper_bound = _next_row_in_range ? next_lower_bound : _upper_bound;
|
||||
if (_snp->range_tombstones(_lower_bound, upper_bound, [&] (range_tombstone rts) {
|
||||
position_in_partition::less_compare less(*_schema);
|
||||
// Avoid emitting overlapping range tombstones for performance reasons.
|
||||
if (less(upper_bound, rts.end_position())) {
|
||||
rts.set_end(*_schema, upper_bound);
|
||||
}
|
||||
add_range_tombstone_to_buffer(std::move(rts));
|
||||
return stop_iteration(_lower_bound_changed && is_buffer_full());
|
||||
}, _read_context.is_reversed()) == stop_iteration::no) {
|
||||
@@ -660,10 +599,6 @@ void cache_flat_mutation_reader::move_to_end() {
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::move_to_next_range() {
|
||||
if (_queued_underlying_fragment) {
|
||||
add_to_buffer(*std::exchange(_queued_underlying_fragment, {}));
|
||||
}
|
||||
flush_tombstones(position_in_partition::for_range_end(*_ck_ranges_curr), true);
|
||||
auto next_it = std::next(_ck_ranges_curr);
|
||||
if (next_it == _ck_ranges_end) {
|
||||
move_to_end();
|
||||
@@ -680,7 +615,6 @@ void cache_flat_mutation_reader::move_to_range(query::clustering_row_ranges::con
|
||||
_last_row = nullptr;
|
||||
_lower_bound = std::move(lb);
|
||||
_upper_bound = std::move(ub);
|
||||
_rt_gen.trim(_lower_bound);
|
||||
_lower_bound_changed = true;
|
||||
_ck_ranges_curr = next_it;
|
||||
auto adjacent = _next_row.advance_to(_lower_bound);
|
||||
@@ -727,7 +661,7 @@ void cache_flat_mutation_reader::maybe_drop_last_entry() noexcept {
|
||||
// This prevents unnecessary dummy entries from accumulating in cache and slowing down scans.
|
||||
//
|
||||
// Eviction can happen only from oldest versions to preserve the continuity non-overlapping rule
|
||||
// (See docs/dev/row_cache.md)
|
||||
// (See docs/design-notes/row_cache.md)
|
||||
//
|
||||
if (_last_row
|
||||
&& !_read_context.is_reversed() // FIXME
|
||||
@@ -772,49 +706,27 @@ void cache_flat_mutation_reader::move_to_next_entry() {
|
||||
}
|
||||
}
|
||||
|
||||
void cache_flat_mutation_reader::flush_tombstones(position_in_partition_view pos, bool end_of_range) {
|
||||
// Ensure position is appropriate for range tombstone bound
|
||||
pos = position_in_partition_view::after_key(pos);
|
||||
clogger.trace("csm {}: flush_tombstones({}) end_of_range: {}", fmt::ptr(this), pos, end_of_range);
|
||||
_rt_gen.flush(pos, [this] (range_tombstone_change&& rtc) {
|
||||
add_to_buffer(std::move(rtc), source::cache);
|
||||
}, end_of_range);
|
||||
if (auto rtc_opt = _rt_merger.flush(pos, end_of_range)) {
|
||||
do_add_to_buffer(std::move(*rtc_opt));
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::add_to_buffer(mutation_fragment_v2&& mf) {
|
||||
clogger.trace("csm {}: add_to_buffer({})", fmt::ptr(this), mutation_fragment_v2::printer(*_schema, mf));
|
||||
position_in_partition::less_compare less(*_schema);
|
||||
if (_underlying_upper_bound && less(*_underlying_upper_bound, mf.position())) {
|
||||
_queued_underlying_fragment = std::move(mf);
|
||||
return;
|
||||
}
|
||||
flush_tombstones(mf.position());
|
||||
void cache_flat_mutation_reader::add_to_buffer(mutation_fragment&& mf) {
|
||||
clogger.trace("csm {}: add_to_buffer({})", fmt::ptr(this), mutation_fragment::printer(*_schema, mf));
|
||||
if (mf.is_clustering_row()) {
|
||||
add_clustering_row_to_buffer(std::move(mf));
|
||||
} else {
|
||||
assert(mf.is_range_tombstone_change());
|
||||
add_to_buffer(std::move(mf).as_range_tombstone_change(), source::underlying);
|
||||
assert(mf.is_range_tombstone());
|
||||
add_to_buffer(std::move(mf).as_range_tombstone());
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::add_to_buffer(const partition_snapshot_row_cursor& row) {
|
||||
position_in_partition::less_compare less(*_schema);
|
||||
if (_queued_underlying_fragment && less(_queued_underlying_fragment->position(), row.position())) {
|
||||
add_to_buffer(*std::exchange(_queued_underlying_fragment, {}));
|
||||
}
|
||||
if (!row.dummy()) {
|
||||
_read_context.cache().on_row_hit();
|
||||
if (_read_context.digest_requested()) {
|
||||
row.latest_row().cells().prepare_hash(table_schema(), column_kind::regular_column);
|
||||
}
|
||||
flush_tombstones(position_in_partition_view::for_key(row.key()));
|
||||
add_clustering_row_to_buffer(mutation_fragment_v2(*_schema, _permit, row.row()));
|
||||
add_clustering_row_to_buffer(mutation_fragment(*_schema, _permit, row.row()));
|
||||
} else {
|
||||
position_in_partition::less_compare less(*_schema);
|
||||
if (less(_lower_bound, row.position())) {
|
||||
_lower_bound = row.position();
|
||||
_lower_bound_changed = true;
|
||||
@@ -827,8 +739,8 @@ void cache_flat_mutation_reader::add_to_buffer(const partition_snapshot_row_curs
|
||||
// (1) no fragment with position >= _lower_bound was pushed yet
|
||||
// (2) If _lower_bound > mf.position(), mf was emitted
|
||||
inline
|
||||
void cache_flat_mutation_reader::add_clustering_row_to_buffer(mutation_fragment_v2&& mf) {
|
||||
clogger.trace("csm {}: add_clustering_row_to_buffer({})", fmt::ptr(this), mutation_fragment_v2::printer(*_schema, mf));
|
||||
void cache_flat_mutation_reader::add_clustering_row_to_buffer(mutation_fragment&& mf) {
|
||||
clogger.trace("csm {}: add_clustering_row_to_buffer({})", fmt::ptr(this), mutation_fragment::printer(*_schema, mf));
|
||||
auto& row = mf.as_clustering_row();
|
||||
auto new_lower_bound = position_in_partition::after_key(row.key());
|
||||
push_mutation_fragment(std::move(mf));
|
||||
@@ -840,46 +752,32 @@ void cache_flat_mutation_reader::add_clustering_row_to_buffer(mutation_fragment_
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::add_to_buffer(range_tombstone_change&& rtc, source src) {
|
||||
clogger.trace("csm {}: add_to_buffer({})", fmt::ptr(this), rtc);
|
||||
if (auto rtc_opt = _rt_merger.apply(src, std::move(rtc))) {
|
||||
do_add_to_buffer(std::move(*rtc_opt));
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::do_add_to_buffer(range_tombstone_change&& rtc) {
|
||||
clogger.trace("csm {}: push({})", fmt::ptr(this), rtc);
|
||||
void cache_flat_mutation_reader::add_to_buffer(range_tombstone&& rt) {
|
||||
clogger.trace("csm {}: add_to_buffer({})", fmt::ptr(this), rt);
|
||||
// This guarantees that rt starts after any emitted clustering_row
|
||||
// and not before any emitted range tombstone.
|
||||
position_in_partition::less_compare less(*_schema);
|
||||
auto lower_bound_changed = less(_lower_bound, rtc.position());
|
||||
_lower_bound = position_in_partition(rtc.position());
|
||||
_lower_bound_changed = lower_bound_changed;
|
||||
push_mutation_fragment(*_schema, _permit, std::move(rtc));
|
||||
_read_context.cache()._tracker.on_range_tombstone_read();
|
||||
if (less(_lower_bound, rt.end_position())) {
|
||||
add_range_tombstone_to_buffer(std::move(rt));
|
||||
}
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::add_range_tombstone_to_buffer(range_tombstone&& rt) {
|
||||
position_in_partition::less_compare less(*_schema);
|
||||
if (_queued_underlying_fragment && less(_queued_underlying_fragment->position(), rt.position())) {
|
||||
add_to_buffer(*std::exchange(_queued_underlying_fragment, {}));
|
||||
}
|
||||
clogger.trace("csm {}: add_to_buffer({})", fmt::ptr(this), rt);
|
||||
if (!less(_lower_bound, rt.position())) {
|
||||
rt.set_start(_lower_bound);
|
||||
} else {
|
||||
_lower_bound = position_in_partition(rt.position());
|
||||
_lower_bound_changed = true;
|
||||
}
|
||||
flush_tombstones(rt.position());
|
||||
_rt_gen.consume(std::move(rt));
|
||||
clogger.trace("csm {}: push({})", fmt::ptr(this), rt);
|
||||
push_mutation_fragment(*_schema, _permit, std::move(rt));
|
||||
_read_context.cache()._tracker.on_range_tombstone_read();
|
||||
}
|
||||
|
||||
inline
|
||||
void cache_flat_mutation_reader::maybe_add_to_cache(const range_tombstone_change& rtc) {
|
||||
clogger.trace("csm {}: maybe_add_to_cache({})", fmt::ptr(this), rtc);
|
||||
auto rt_opt = _rt_assembler.consume(*_schema, range_tombstone_change(rtc));
|
||||
if (!rt_opt) {
|
||||
return;
|
||||
}
|
||||
const auto& rt = *rt_opt;
|
||||
void cache_flat_mutation_reader::maybe_add_to_cache(const range_tombstone& rt) {
|
||||
if (can_populate()) {
|
||||
clogger.trace("csm {}: maybe_add_to_cache({})", fmt::ptr(this), rt);
|
||||
_lsa_manager.run_in_update_section_with_allocator([&] {
|
||||
@@ -927,25 +825,25 @@ bool cache_flat_mutation_reader::can_populate() const {
|
||||
|
||||
// pass a reference to ctx to cache_flat_mutation_reader
|
||||
// keeping its ownership at caller's.
|
||||
inline flat_mutation_reader_v2 make_cache_flat_mutation_reader(schema_ptr s,
|
||||
inline flat_mutation_reader make_cache_flat_mutation_reader(schema_ptr s,
|
||||
dht::decorated_key dk,
|
||||
query::clustering_key_filter_ranges crr,
|
||||
row_cache& cache,
|
||||
cache::read_context& ctx,
|
||||
partition_snapshot_ptr snp)
|
||||
{
|
||||
return make_flat_mutation_reader_v2<cache::cache_flat_mutation_reader>(
|
||||
return make_flat_mutation_reader<cache::cache_flat_mutation_reader>(
|
||||
std::move(s), std::move(dk), std::move(crr), ctx, std::move(snp), cache);
|
||||
}
|
||||
|
||||
// transfer ownership of ctx to cache_flat_mutation_reader
|
||||
inline flat_mutation_reader_v2 make_cache_flat_mutation_reader(schema_ptr s,
|
||||
inline flat_mutation_reader make_cache_flat_mutation_reader(schema_ptr s,
|
||||
dht::decorated_key dk,
|
||||
query::clustering_key_filter_ranges crr,
|
||||
row_cache& cache,
|
||||
std::unique_ptr<cache::read_context> unique_ctx,
|
||||
partition_snapshot_ptr snp)
|
||||
{
|
||||
return make_flat_mutation_reader_v2<cache::cache_flat_mutation_reader>(
|
||||
return make_flat_mutation_reader<cache::cache_flat_mutation_reader>(
|
||||
std::move(s), std::move(dk), std::move(crr), std::move(unique_ctx), std::move(snp), cache);
|
||||
}
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
#include "cdc/generation.hh"
|
||||
#include "keys.hh"
|
||||
|
||||
static const sstring cdc_partitioner_name = "com.scylladb.dht.CDCPartitioner";
|
||||
|
||||
namespace cdc {
|
||||
|
||||
const sstring cdc_partitioner::classname = "com.scylladb.dht.CDCPartitioner";
|
||||
|
||||
const sstring cdc_partitioner::name() const {
|
||||
return classname;
|
||||
return cdc_partitioner_name;
|
||||
}
|
||||
|
||||
static dht::token to_token(int64_t value) {
|
||||
@@ -48,7 +48,7 @@ cdc_partitioner::get_token(const schema& s, partition_key_view key) const {
|
||||
}
|
||||
|
||||
using registry = class_registrator<dht::i_partitioner, cdc_partitioner>;
|
||||
static registry registrator(cdc::cdc_partitioner::classname);
|
||||
static registry registrator(cdc_partitioner_name);
|
||||
static registry registrator_short_name("CDCPartitioner");
|
||||
|
||||
}
|
||||
|
||||
@@ -25,8 +25,6 @@ class key_view;
|
||||
namespace cdc {
|
||||
|
||||
struct cdc_partitioner final : public dht::i_partitioner {
|
||||
static const sstring classname;
|
||||
|
||||
cdc_partitioner() = default;
|
||||
virtual const sstring name() const override;
|
||||
virtual dht::token get_token(const schema& s, partition_key_view key) const override;
|
||||
|
||||
@@ -340,7 +340,7 @@ future<cdc::generation_id> generation_service::make_new_generation(const std::un
|
||||
auto normal_token_owners = tmptr->count_normal_token_owners();
|
||||
assert(normal_token_owners);
|
||||
|
||||
if (_feature_service.cdc_generations_v2) {
|
||||
if (_feature_service.cluster_supports_cdc_generations_v2()) {
|
||||
auto uuid = utils::make_random_uuid();
|
||||
cdc_log.info("Inserting new generation data at UUID {}", uuid);
|
||||
// This may take a while.
|
||||
@@ -670,13 +670,11 @@ constexpr char could_not_retrieve_msg_template[]
|
||||
|
||||
generation_service::generation_service(
|
||||
config cfg, gms::gossiper& g, sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<db::system_keyspace>& sys_ks,
|
||||
abort_source& abort_src, const locator::shared_token_metadata& stm, gms::feature_service& f,
|
||||
replica::database& db)
|
||||
: _cfg(std::move(cfg))
|
||||
, _gossiper(g)
|
||||
, _sys_dist_ks(sys_dist_ks)
|
||||
, _sys_ks(sys_ks)
|
||||
, _abort_src(abort_src)
|
||||
, _token_metadata(stm)
|
||||
, _feature_service(f)
|
||||
@@ -704,7 +702,7 @@ generation_service::~generation_service() {
|
||||
|
||||
future<> generation_service::after_join(std::optional<cdc::generation_id>&& startup_gen_id) {
|
||||
assert_shard_zero(__PRETTY_FUNCTION__);
|
||||
assert(_sys_ks.local().bootstrap_complete());
|
||||
assert(db::system_keyspace::bootstrap_complete());
|
||||
|
||||
_gen_id = std::move(startup_gen_id);
|
||||
_gossiper.register_(shared_from_this());
|
||||
@@ -778,7 +776,7 @@ future<> generation_service::check_and_repair_cdc_streams() {
|
||||
cdc_log.warn("check_and_repair_cdc_streams: no generation observed in gossip");
|
||||
should_regenerate = true;
|
||||
} else if (std::holds_alternative<cdc::generation_id_v1>(*latest)
|
||||
&& _feature_service.cdc_generations_v2) {
|
||||
&& _feature_service.cluster_supports_cdc_generations_v2()) {
|
||||
cdc_log.info(
|
||||
"Cluster still using CDC generation storage format V1 (id: {}), even though it already understands the V2 format."
|
||||
" Creating a new generation using V2.", *latest);
|
||||
@@ -880,7 +878,7 @@ future<> generation_service::handle_cdc_generation(std::optional<cdc::generation
|
||||
co_return;
|
||||
}
|
||||
|
||||
if (!_sys_ks.local().bootstrap_complete() || !_sys_dist_ks.local_is_initialized()
|
||||
if (!db::system_keyspace::bootstrap_complete() || !_sys_dist_ks.local_is_initialized()
|
||||
|| !_sys_dist_ks.local().started()) {
|
||||
// The service should not be listening for generation changes until after the node
|
||||
// is bootstrapped. Therefore we would previously assume that this condition
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
namespace db {
|
||||
class system_distributed_keyspace;
|
||||
class system_keyspace;
|
||||
}
|
||||
|
||||
namespace gms {
|
||||
@@ -52,7 +51,6 @@ private:
|
||||
config _cfg;
|
||||
gms::gossiper& _gossiper;
|
||||
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
||||
sharded<db::system_keyspace>& _sys_ks;
|
||||
abort_source& _abort_src;
|
||||
const locator::shared_token_metadata& _token_metadata;
|
||||
gms::feature_service& _feature_service;
|
||||
@@ -79,9 +77,7 @@ private:
|
||||
future<> _cdc_streams_rewrite_complete = make_ready_future<>();
|
||||
public:
|
||||
generation_service(config cfg, gms::gossiper&,
|
||||
sharded<db::system_distributed_keyspace>&,
|
||||
sharded<db::system_keyspace>& sys_ks,
|
||||
abort_source&, const locator::shared_token_metadata&,
|
||||
sharded<db::system_distributed_keyspace>&, abort_source&, const locator::shared_token_metadata&,
|
||||
gms::feature_service&, replica::database& db);
|
||||
|
||||
future<> stop();
|
||||
|
||||
65
cdc/log.cc
65
cdc/log.cc
@@ -20,7 +20,6 @@
|
||||
#include "cdc/cdc_options.hh"
|
||||
#include "cdc/change_visitor.hh"
|
||||
#include "cdc/metadata.hh"
|
||||
#include "cdc/cdc_partitioner.hh"
|
||||
#include "bytes.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "db/schema_tables.hh"
|
||||
@@ -31,6 +30,7 @@
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "types/tuple.hh"
|
||||
#include "cql3/statements/select_statement.hh"
|
||||
#include "cql3/multi_column_relation.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
#include "log.hh"
|
||||
#include "utils/rjson.hh"
|
||||
@@ -169,8 +169,9 @@ public:
|
||||
|
||||
// in seastar thread
|
||||
auto log_schema = create_log_schema(schema);
|
||||
auto& keyspace = db.find_keyspace(schema.ks_name());
|
||||
|
||||
auto log_mut = db::schema_tables::make_create_table_mutations(log_schema, timestamp);
|
||||
auto log_mut = db::schema_tables::make_create_table_mutations(keyspace.metadata(), log_schema, timestamp);
|
||||
|
||||
mutations.insert(mutations.end(), std::make_move_iterator(log_mut.begin()), std::make_move_iterator(log_mut.end()));
|
||||
}
|
||||
@@ -180,36 +181,36 @@ public:
|
||||
bool is_cdc = new_schema.cdc_options().enabled();
|
||||
bool was_cdc = old_schema.cdc_options().enabled();
|
||||
|
||||
// if we are turning off cdc we can skip this, since even if columns change etc,
|
||||
// any writer should see cdc -> off together with any actual schema changes to
|
||||
// base table, so should never try to write to non-existent log column etc.
|
||||
// note that if user has set ttl=0 in cdc options, he is still reponsible
|
||||
// for emptying the log.
|
||||
if (is_cdc) {
|
||||
// we need to create or modify the log & stream schemas iff either we changed cdc status (was != is)
|
||||
// or if cdc is on now unconditionally, since then any actual base schema changes will affect the column
|
||||
// etc.
|
||||
if (was_cdc || is_cdc) {
|
||||
auto& db = _ctxt._proxy.get_db().local();
|
||||
auto logname = log_name(old_schema.cf_name());
|
||||
auto& keyspace = db.find_keyspace(old_schema.ks_name());
|
||||
auto has_cdc_log = db.has_schema(old_schema.ks_name(), logname);
|
||||
auto log_schema = has_cdc_log ? db.find_schema(old_schema.ks_name(), logname) : nullptr;
|
||||
|
||||
if (!was_cdc && has_cdc_log) {
|
||||
// make sure the apparent log table really is a cdc log (not user table)
|
||||
// we just check the partitioner - since user tables should _not_ be able
|
||||
// set/use this.
|
||||
if (log_schema->get_partitioner().name() != cdc::cdc_partitioner::classname) {
|
||||
// will throw
|
||||
check_that_cdc_log_table_does_not_exist(db, old_schema, logname);
|
||||
}
|
||||
if (!was_cdc) {
|
||||
check_that_cdc_log_table_does_not_exist(db, new_schema, log_name(new_schema.cf_name()));
|
||||
}
|
||||
if (is_cdc) {
|
||||
check_for_attempt_to_create_nested_cdc_log(db, new_schema);
|
||||
ensure_that_table_has_no_counter_columns(new_schema);
|
||||
}
|
||||
|
||||
check_for_attempt_to_create_nested_cdc_log(db, new_schema);
|
||||
ensure_that_table_has_no_counter_columns(new_schema);
|
||||
auto logname = log_name(old_schema.cf_name());
|
||||
auto& keyspace = db.find_keyspace(old_schema.ks_name());
|
||||
auto log_schema = was_cdc ? db.find_column_family(old_schema.ks_name(), logname).schema() : nullptr;
|
||||
|
||||
if (!is_cdc) {
|
||||
auto log_mut = db::schema_tables::make_drop_table_mutations(keyspace.metadata(), log_schema, timestamp);
|
||||
|
||||
mutations.insert(mutations.end(), std::make_move_iterator(log_mut.begin()), std::make_move_iterator(log_mut.end()));
|
||||
return;
|
||||
}
|
||||
|
||||
auto new_log_schema = create_log_schema(new_schema, log_schema ? std::make_optional(log_schema->id()) : std::nullopt, log_schema);
|
||||
|
||||
auto log_mut = log_schema
|
||||
? db::schema_tables::make_update_table_mutations(db, keyspace.metadata(), log_schema, new_log_schema, timestamp, false)
|
||||
: db::schema_tables::make_create_table_mutations(new_log_schema, timestamp)
|
||||
: db::schema_tables::make_create_table_mutations(keyspace.metadata(), new_log_schema, timestamp)
|
||||
;
|
||||
|
||||
mutations.insert(mutations.end(), std::make_move_iterator(log_mut.begin()), std::make_move_iterator(log_mut.end()));
|
||||
@@ -217,16 +218,14 @@ public:
|
||||
}
|
||||
|
||||
void on_before_drop_column_family(const schema& schema, std::vector<mutation>& mutations, api::timestamp_type timestamp) override {
|
||||
auto logname = log_name(schema.cf_name());
|
||||
auto& db = _ctxt._proxy.get_db().local();
|
||||
auto has_cdc_log = db.has_schema(schema.ks_name(), logname);
|
||||
if (has_cdc_log) {
|
||||
auto log_schema = db.find_schema(schema.ks_name(), logname);
|
||||
if (log_schema->get_partitioner().name() != cdc::cdc_partitioner::classname) {
|
||||
return;
|
||||
}
|
||||
if (schema.cdc_options().enabled()) {
|
||||
auto logname = log_name(schema.cf_name());
|
||||
auto& db = _ctxt._proxy.get_db().local();
|
||||
auto& keyspace = db.find_keyspace(schema.ks_name());
|
||||
auto log_schema = db.find_column_family(schema.ks_name(), logname).schema();
|
||||
|
||||
auto log_mut = db::schema_tables::make_drop_table_mutations(keyspace.metadata(), log_schema, timestamp);
|
||||
|
||||
mutations.insert(mutations.end(), std::make_move_iterator(log_mut.begin()), std::make_move_iterator(log_mut.end()));
|
||||
}
|
||||
}
|
||||
@@ -408,7 +407,7 @@ static const sstring cdc_meta_column_prefix = "cdc$";
|
||||
static const sstring cdc_deleted_column_prefix = cdc_meta_column_prefix + "deleted_";
|
||||
static const sstring cdc_deleted_elements_column_prefix = cdc_meta_column_prefix + "deleted_elements_";
|
||||
|
||||
bool is_log_name(const std::string_view& table_name) {
|
||||
static bool is_log_name(const std::string_view& table_name) {
|
||||
return boost::ends_with(table_name, cdc_log_suffix);
|
||||
}
|
||||
|
||||
@@ -487,7 +486,7 @@ bytes log_data_column_deleted_elements_name_bytes(const bytes& column_name) {
|
||||
|
||||
static schema_ptr create_log_schema(const schema& s, std::optional<utils::UUID> uuid, schema_ptr old) {
|
||||
schema_builder b(s.ks_name(), log_name(s.cf_name()));
|
||||
b.with_partitioner(cdc::cdc_partitioner::classname);
|
||||
b.with_partitioner("com.scylladb.dht.CDCPartitioner");
|
||||
b.set_compaction_strategy(sstables::compaction_strategy_type::time_window);
|
||||
b.set_comment(fmt::format("CDC log for {}.{}", s.ks_name(), s.cf_name()));
|
||||
auto ttl_seconds = s.cdc_options().ttl();
|
||||
|
||||
@@ -60,8 +60,6 @@ struct operation_result_tracker;
|
||||
class db_context;
|
||||
class metadata;
|
||||
|
||||
bool is_log_name(const std::string_view& table_name);
|
||||
|
||||
/// \brief CDC service, responsible for schema listeners
|
||||
///
|
||||
/// CDC service will listen for schema changes and iff CDC is enabled/changed
|
||||
|
||||
@@ -8,8 +8,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/file.hh>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include "seastar/core/file.hh"
|
||||
#include "seastar/core/seastar.hh"
|
||||
#include "utils/disk-error-handler.hh"
|
||||
|
||||
#include "seastarx.hh"
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "client_data.hh"
|
||||
#include <stdexcept>
|
||||
|
||||
sstring to_string(client_type ct) {
|
||||
switch (ct) {
|
||||
case client_type::cql: return "cql";
|
||||
case client_type::thrift: return "thrift";
|
||||
case client_type::alternator: return "alternator";
|
||||
}
|
||||
throw std::runtime_error("Invalid client_type");
|
||||
}
|
||||
|
||||
sstring to_string(client_connection_stage ccs) {
|
||||
switch (ccs) {
|
||||
case client_connection_stage::established: return "ESTABLISHED";
|
||||
case client_connection_stage::authenticating: return "AUTHENTICATING";
|
||||
case client_connection_stage::ready: return "READY";
|
||||
}
|
||||
throw std::runtime_error("Invalid client_connection_stage");
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <seastar/net/inet_address.hh>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include "seastarx.hh"
|
||||
|
||||
#include <optional>
|
||||
|
||||
enum class client_type {
|
||||
cql = 0,
|
||||
thrift,
|
||||
alternator,
|
||||
};
|
||||
|
||||
sstring to_string(client_type ct);
|
||||
|
||||
enum class client_connection_stage {
|
||||
established = 0,
|
||||
authenticating,
|
||||
ready,
|
||||
};
|
||||
|
||||
sstring to_string(client_connection_stage ct);
|
||||
|
||||
// Representation of a row in `system.clients'. std::optionals are for nullable cells.
|
||||
struct client_data {
|
||||
net::inet_address ip;
|
||||
int32_t port;
|
||||
client_type ct;
|
||||
client_connection_stage connection_stage = client_connection_stage::established;
|
||||
int32_t shard_id; /// ID of server-side shard which is processing the connection.
|
||||
|
||||
std::optional<sstring> driver_name;
|
||||
std::optional<sstring> driver_version;
|
||||
std::optional<sstring> hostname;
|
||||
std::optional<int32_t> protocol_version;
|
||||
std::optional<sstring> ssl_cipher_suite;
|
||||
std::optional<bool> ssl_enabled;
|
||||
std::optional<sstring> ssl_protocol;
|
||||
std::optional<sstring> username;
|
||||
|
||||
sstring stage_str() const { return to_string(connection_stage); }
|
||||
sstring client_type_str() const { return to_string(ct); }
|
||||
};
|
||||
@@ -19,7 +19,7 @@ using column_computation_ptr = std::unique_ptr<column_computation>;
|
||||
|
||||
/*
|
||||
* Column computation represents a computation performed in order to obtain a value for a computed column.
|
||||
* Computed columns description is also available at docs/dev/system_schema_keyspace.md. They hold values
|
||||
* Computed columns description is also available at docs/system_schema_keyspace.md. They hold values
|
||||
* not provided directly by the user, but rather computed: from other column values and possibly other sources.
|
||||
* This class is able to serialize/deserialize column computations and perform the computation itself,
|
||||
* based on given schema, partition key and clustering row. Responsibility for providing enough data
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <seastar/core/scheduling.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/util/closeable.hh>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
|
||||
#include "sstables/sstables.hh"
|
||||
#include "sstables/sstable_writer.hh"
|
||||
@@ -34,6 +33,7 @@
|
||||
#include "sstables/sstables_manager.hh"
|
||||
#include "compaction.hh"
|
||||
#include "compaction_manager.hh"
|
||||
#include "mutation_reader.hh"
|
||||
#include "schema.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "service/priority_manager.hh"
|
||||
@@ -41,7 +41,6 @@
|
||||
#include "mutation_compactor.hh"
|
||||
#include "leveled_manifest.hh"
|
||||
#include "dht/token.hh"
|
||||
#include "dht/partition_filter.hh"
|
||||
#include "mutation_writer/shard_based_splitting_writer.hh"
|
||||
#include "mutation_writer/partition_based_splitting_writer.hh"
|
||||
#include "mutation_source_metadata.hh"
|
||||
@@ -49,9 +48,6 @@
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "utils/utf8.hh"
|
||||
#include "utils/fmt-compat.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "readers/filtering.hh"
|
||||
#include "readers/compacting.hh"
|
||||
#include "tombstone_gc.hh"
|
||||
|
||||
namespace sstables {
|
||||
@@ -90,7 +86,7 @@ compaction_type to_compaction_type(sstring type_name) {
|
||||
throw std::runtime_error("Invalid Compaction Type Name");
|
||||
}
|
||||
|
||||
std::string_view to_string(compaction_type type) {
|
||||
static std::string_view to_string(compaction_type type) {
|
||||
switch (type) {
|
||||
case compaction_type::Compaction: return "Compact";
|
||||
case compaction_type::Cleanup: return "Cleanup";
|
||||
@@ -166,7 +162,7 @@ std::ostream& operator<<(std::ostream& os, pretty_printed_throughput tp) {
|
||||
}
|
||||
|
||||
static api::timestamp_type get_max_purgeable_timestamp(const table_state& table_s, sstable_set::incremental_selector& selector,
|
||||
const std::unordered_set<shared_sstable>& compacting_set, const dht::decorated_key& dk, uint64_t& bloom_filter_checks) {
|
||||
const std::unordered_set<shared_sstable>& compacting_set, const dht::decorated_key& dk) {
|
||||
auto timestamp = table_s.min_memtable_timestamp();
|
||||
std::optional<utils::hashed_key> hk;
|
||||
for (auto&& sst : boost::range::join(selector.select(dk).sstables, table_s.compacted_undeleted_sstables())) {
|
||||
@@ -177,7 +173,6 @@ static api::timestamp_type get_max_purgeable_timestamp(const table_state& table_
|
||||
hk = sstables::sstable::make_hashed_key(*table_s.schema(), dk.key());
|
||||
}
|
||||
if (sst->filter_has_key(*hk)) {
|
||||
bloom_filter_checks++;
|
||||
timestamp = std::min(timestamp, sst->get_stats_metadata().min_timestamp);
|
||||
}
|
||||
}
|
||||
@@ -185,7 +180,7 @@ static api::timestamp_type get_max_purgeable_timestamp(const table_state& table_
|
||||
}
|
||||
|
||||
static std::vector<shared_sstable> get_uncompacting_sstables(const table_state& table_s, std::vector<shared_sstable> sstables) {
|
||||
auto all_sstables = boost::copy_range<std::vector<shared_sstable>>(*table_s.main_sstable_set().all());
|
||||
auto all_sstables = boost::copy_range<std::vector<shared_sstable>>(*table_s.get_sstable_set().all());
|
||||
auto& compacted_undeleted = table_s.compacted_undeleted_sstables();
|
||||
all_sstables.insert(all_sstables.end(), compacted_undeleted.begin(), compacted_undeleted.end());
|
||||
boost::sort(all_sstables, [] (const shared_sstable& x, const shared_sstable& y) {
|
||||
@@ -321,9 +316,9 @@ public:
|
||||
stop_iteration consume(clustering_row&& cr) {
|
||||
return consume(std::move(cr), row_tombstone{}, bool{});
|
||||
}
|
||||
stop_iteration consume(range_tombstone_change&& rtc) {
|
||||
stop_iteration consume(range_tombstone&& rt) {
|
||||
maybe_abort_compaction();
|
||||
return _compaction_writer->writer.consume(std::move(rtc));
|
||||
return _compaction_writer->writer.consume(std::move(rt));
|
||||
}
|
||||
|
||||
stop_iteration consume_end_of_partition();
|
||||
@@ -395,17 +390,14 @@ struct compaction_read_monitor_generator final : public read_monitor_generator {
|
||||
}
|
||||
private:
|
||||
table_state& _table_s;
|
||||
std::unordered_map<generation_type, compaction_read_monitor> _generated_monitors;
|
||||
std::unordered_map<int64_t, compaction_read_monitor> _generated_monitors;
|
||||
};
|
||||
|
||||
class formatted_sstables_list {
|
||||
bool _include_origin = true;
|
||||
std::vector<std::string> _ssts;
|
||||
std::vector<sstring> _ssts;
|
||||
public:
|
||||
formatted_sstables_list() = default;
|
||||
void reserve(size_t n) {
|
||||
_ssts.reserve(n);
|
||||
}
|
||||
explicit formatted_sstables_list(const std::vector<shared_sstable>& ssts, bool include_origin) : _include_origin(include_origin) {
|
||||
_ssts.reserve(ssts.size());
|
||||
for (const auto& sst : ssts) {
|
||||
@@ -424,7 +416,9 @@ public:
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const formatted_sstables_list& lst) {
|
||||
fmt::print(os, "[{}]", fmt::join(lst._ssts, ","));
|
||||
os << "[";
|
||||
os << boost::algorithm::join(lst._ssts, ",");
|
||||
os << "]";
|
||||
return os;
|
||||
}
|
||||
|
||||
@@ -436,7 +430,7 @@ protected:
|
||||
schema_ptr _schema;
|
||||
reader_permit _permit;
|
||||
std::vector<shared_sstable> _sstables;
|
||||
std::vector<generation_type> _input_sstable_generations;
|
||||
std::vector<unsigned long> _input_sstable_generations;
|
||||
// Unused sstables are tracked because if compaction is interrupted we can only delete them.
|
||||
// Deleting used sstables could potentially result in data loss.
|
||||
std::unordered_set<shared_sstable> _new_partial_sstables;
|
||||
@@ -449,7 +443,6 @@ protected:
|
||||
uint64_t _start_size = 0;
|
||||
uint64_t _end_size = 0;
|
||||
uint64_t _estimated_partitions = 0;
|
||||
uint64_t _bloom_filter_checks = 0;
|
||||
db::replay_position _rp;
|
||||
encoding_stats_collector _stats_collector;
|
||||
bool _contains_multi_fragment_runs = false;
|
||||
@@ -559,13 +552,13 @@ protected:
|
||||
return bool(_sstable_set);
|
||||
}
|
||||
|
||||
compaction_writer create_gc_compaction_writer(utils::UUID gc_run) const {
|
||||
compaction_writer create_gc_compaction_writer() const {
|
||||
auto sst = _sstable_creator(this_shard_id());
|
||||
|
||||
auto&& priority = _io_priority;
|
||||
auto monitor = std::make_unique<compaction_write_monitor>(sst, _table_s, maximum_timestamp(), _sstable_level);
|
||||
sstable_writer_config cfg = _table_s.configure_writer("garbage_collection");
|
||||
cfg.run_identifier = gc_run;
|
||||
cfg.run_identifier = _run_identifier;
|
||||
cfg.monitor = monitor.get();
|
||||
auto writer = sst->get_writer(*schema(), partitions_per_sstable(), cfg, get_encoding_stats(), priority);
|
||||
return compaction_writer(std::move(monitor), std::move(writer), std::move(sst));
|
||||
@@ -586,14 +579,8 @@ protected:
|
||||
// When compaction finishes, all the temporary sstables generated here will be deleted and removed
|
||||
// from table's sstable set.
|
||||
compacted_fragments_writer get_gc_compacted_fragments_writer() {
|
||||
// because the temporary sstable run can overlap with the non-gc sstables run created by
|
||||
// get_compacted_fragments_writer(), we have to use a different run_id. the gc_run_id is
|
||||
// created here as:
|
||||
// 1. it can be shared across all sstables created by this writer
|
||||
// 2. it is optional, as gc writer is not always used
|
||||
auto gc_run = utils::make_random_uuid();
|
||||
return compacted_fragments_writer(*this,
|
||||
[this, gc_run] (const dht::decorated_key&) { return create_gc_compaction_writer(gc_run); },
|
||||
[this] (const dht::decorated_key&) { return create_gc_compaction_writer(); },
|
||||
[this] (compaction_writer* cw) { stop_gc_compaction_writer(cw); },
|
||||
_stop_request_observable);
|
||||
}
|
||||
@@ -609,10 +596,6 @@ protected:
|
||||
const std::vector<shared_sstable>& used_garbage_collected_sstables() const {
|
||||
return _used_garbage_collected_sstables;
|
||||
}
|
||||
|
||||
virtual bool enable_garbage_collected_sstable_writer() const noexcept {
|
||||
return _contains_multi_fragment_runs && _max_sstable_size != std::numeric_limits<uint64_t>::max() && bool(_replacer);
|
||||
}
|
||||
public:
|
||||
compaction& operator=(const compaction&) = delete;
|
||||
compaction(const compaction&) = delete;
|
||||
@@ -630,16 +613,13 @@ private:
|
||||
return _table_s.get_compaction_strategy().make_sstable_set(_schema);
|
||||
}
|
||||
|
||||
future<> setup() {
|
||||
void setup() {
|
||||
auto ssts = make_lw_shared<sstables::sstable_set>(make_sstable_set_for_input());
|
||||
formatted_sstables_list formatted_msg;
|
||||
formatted_msg.reserve(_sstables.size());
|
||||
auto fully_expired = _table_s.fully_expired_sstables(_sstables, gc_clock::now());
|
||||
min_max_tracker<api::timestamp_type> timestamp_tracker;
|
||||
|
||||
_input_sstable_generations.reserve(_sstables.size());
|
||||
for (auto& sst : _sstables) {
|
||||
co_await coroutine::maybe_yield();
|
||||
auto& sst_stats = sst->get_stats_metadata();
|
||||
timestamp_tracker.update(sst_stats.min_timestamp);
|
||||
timestamp_tracker.update(sst_stats.max_timestamp);
|
||||
@@ -689,13 +669,13 @@ private:
|
||||
// to be compacted together.
|
||||
future<> consume_without_gc_writer(gc_clock::time_point compaction_time) {
|
||||
auto consumer = make_interposer_consumer([this] (flat_mutation_reader_v2 reader) mutable {
|
||||
return seastar::async([this, reader = std::move(reader)] () mutable {
|
||||
return seastar::async([this, reader = downgrade_to_v1(std::move(reader))] () mutable {
|
||||
auto close_reader = deferred_close(reader);
|
||||
auto cfc = compacted_fragments_writer(get_compacted_fragments_writer());
|
||||
reader.consume_in_thread(std::move(cfc));
|
||||
});
|
||||
});
|
||||
return consumer(make_compacting_reader(make_sstable_reader(), compaction_time, max_purgeable_func()));
|
||||
return consumer(upgrade_to_v2(make_compacting_reader(downgrade_to_v1(make_sstable_reader()), compaction_time, max_purgeable_func())));
|
||||
}
|
||||
|
||||
future<> consume() {
|
||||
@@ -712,7 +692,7 @@ private:
|
||||
auto close_reader = deferred_close(reader);
|
||||
|
||||
if (enable_garbage_collected_sstable_writer()) {
|
||||
using compact_mutations = compact_for_compaction_v2<compacted_fragments_writer, compacted_fragments_writer>;
|
||||
using compact_mutations = compact_for_compaction<compacted_fragments_writer, compacted_fragments_writer>;
|
||||
auto cfc = compact_mutations(*schema(), now,
|
||||
max_purgeable_func(),
|
||||
get_compacted_fragments_writer(),
|
||||
@@ -721,7 +701,7 @@ private:
|
||||
reader.consume_in_thread(std::move(cfc));
|
||||
return;
|
||||
}
|
||||
using compact_mutations = compact_for_compaction_v2<compacted_fragments_writer, noop_compacted_fragments_consumer>;
|
||||
using compact_mutations = compact_for_compaction<compacted_fragments_writer, noop_compacted_fragments_consumer>;
|
||||
auto cfc = compact_mutations(*schema(), now,
|
||||
max_purgeable_func(),
|
||||
get_compacted_fragments_writer(),
|
||||
@@ -739,16 +719,12 @@ private:
|
||||
virtual bool use_interposer_consumer() const {
|
||||
return _table_s.get_compaction_strategy().use_interposer_consumer();
|
||||
}
|
||||
protected:
|
||||
virtual compaction_result finish(std::chrono::time_point<db_clock> started_at, std::chrono::time_point<db_clock> ended_at) {
|
||||
|
||||
compaction_result finish(std::chrono::time_point<db_clock> started_at, std::chrono::time_point<db_clock> ended_at) {
|
||||
compaction_result ret {
|
||||
.new_sstables = std::move(_all_new_sstables),
|
||||
.stats {
|
||||
.ended_at = ended_at,
|
||||
.start_size = _start_size,
|
||||
.end_size = _end_size,
|
||||
.bloom_filter_checks = _bloom_filter_checks,
|
||||
},
|
||||
.ended_at = ended_at,
|
||||
.end_size = _end_size,
|
||||
};
|
||||
|
||||
auto ratio = double(_end_size) / double(_start_size);
|
||||
@@ -767,16 +743,11 @@ protected:
|
||||
log_info("{} {} sstables to {}. {} to {} (~{}% of original) in {}ms = {}. ~{} total partitions merged to {}.",
|
||||
report_finish_desc(),
|
||||
_input_sstable_generations.size(), new_sstables_msg, pretty_printed_data_size(_start_size), pretty_printed_data_size(_end_size), int(ratio * 100),
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(duration).count(), pretty_printed_throughput(_start_size, duration),
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(duration).count(), pretty_printed_throughput(_end_size, duration),
|
||||
_cdata.total_partitions, _cdata.total_keys_written);
|
||||
|
||||
return ret;
|
||||
}
|
||||
private:
|
||||
void on_interrupt(std::exception_ptr ex) {
|
||||
log_info("{} of {} sstables interrupted due to: {}", report_start_desc(), _input_sstable_generations.size(), ex);
|
||||
delete_sstables_for_interrupted_compaction();
|
||||
}
|
||||
|
||||
virtual std::string_view report_start_desc() const = 0;
|
||||
virtual std::string_view report_finish_desc() const = 0;
|
||||
@@ -788,7 +759,7 @@ private:
|
||||
};
|
||||
}
|
||||
return [this] (const dht::decorated_key& dk) {
|
||||
return get_max_purgeable_timestamp(_table_s, *_selector, _compacting_for_max_purgeable_func, dk, _bloom_filter_checks);
|
||||
return get_max_purgeable_timestamp(_table_s, *_selector, _compacting_for_max_purgeable_func, dk);
|
||||
};
|
||||
}
|
||||
|
||||
@@ -854,6 +825,10 @@ protected:
|
||||
log(log_level::trace, std::move(fmt), std::forward<Args>(args)...);
|
||||
}
|
||||
public:
|
||||
bool enable_garbage_collected_sstable_writer() const noexcept {
|
||||
return _contains_multi_fragment_runs && _max_sstable_size != std::numeric_limits<uint64_t>::max();
|
||||
}
|
||||
|
||||
static future<compaction_result> run(std::unique_ptr<compaction> c);
|
||||
|
||||
friend class compacted_fragments_writer;
|
||||
@@ -906,6 +881,51 @@ void compacted_fragments_writer::consume_end_of_stream() {
|
||||
}
|
||||
}
|
||||
|
||||
class reshape_compaction : public compaction {
|
||||
public:
|
||||
reshape_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata)
|
||||
: compaction(table_s, std::move(descriptor), cdata) {
|
||||
}
|
||||
|
||||
virtual sstables::sstable_set make_sstable_set_for_input() const override {
|
||||
return sstables::make_partitioned_sstable_set(_schema, make_lw_shared<sstable_list>(sstable_list{}), false);
|
||||
}
|
||||
|
||||
flat_mutation_reader_v2 make_sstable_reader() const override {
|
||||
return _compacting->make_local_shard_sstable_reader(_schema,
|
||||
_permit,
|
||||
query::full_partition_range,
|
||||
_schema->full_slice(),
|
||||
_io_priority,
|
||||
tracing::trace_state_ptr(),
|
||||
::streamed_mutation::forwarding::no,
|
||||
::mutation_reader::forwarding::no,
|
||||
default_read_monitor_generator());
|
||||
}
|
||||
|
||||
std::string_view report_start_desc() const override {
|
||||
return "Reshaping";
|
||||
}
|
||||
|
||||
std::string_view report_finish_desc() const override {
|
||||
return "Reshaped";
|
||||
}
|
||||
|
||||
virtual compaction_writer create_compaction_writer(const dht::decorated_key& dk) override {
|
||||
auto sst = _sstable_creator(this_shard_id());
|
||||
setup_new_sstable(sst);
|
||||
|
||||
sstable_writer_config cfg = make_sstable_writer_config(compaction_type::Reshape);
|
||||
return compaction_writer{sst->get_writer(*_schema, partitions_per_sstable(), cfg, get_encoding_stats(), _io_priority), sst};
|
||||
}
|
||||
|
||||
virtual void stop_sstable_writer(compaction_writer* writer) override {
|
||||
if (writer) {
|
||||
finish_new_sstable(writer);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class regular_compaction : public compaction {
|
||||
// keeps track of monitors for input sstable, which are responsible for adjusting backlog as compaction progresses.
|
||||
mutable compaction_read_monitor_generator _monitor_generator;
|
||||
@@ -994,7 +1014,7 @@ private:
|
||||
_new_unused_sstables.insert(_new_unused_sstables.end(), unused_gc_sstables.begin(), unused_gc_sstables.end());
|
||||
|
||||
auto exhausted_ssts = std::vector<shared_sstable>(exhausted, _sstables.end());
|
||||
log_debug("Replacing earlier exhausted sstable(s) {} by new sstable(s) {}", formatted_sstables_list(exhausted_ssts, false), formatted_sstables_list(_new_unused_sstables, true));
|
||||
log_debug("Replacing earlier exhausted sstable(s) {} by new sstable {}", formatted_sstables_list(exhausted_ssts, false), sst->get_filename());
|
||||
_replacer(get_compaction_completion_desc(exhausted_ssts, std::move(_new_unused_sstables)));
|
||||
_sstables.erase(exhausted, _sstables.end());
|
||||
_monitor_generator.remove_exhausted_sstables(exhausted_ssts);
|
||||
@@ -1015,13 +1035,12 @@ private:
|
||||
}
|
||||
|
||||
void update_pending_ranges() {
|
||||
auto pending_replacements = std::exchange(_cdata.pending_replacements, {});
|
||||
if (!_sstable_set || _sstable_set->all()->empty() || pending_replacements.empty()) { // set can be empty for testing scenario.
|
||||
if (!_sstable_set || _sstable_set->all()->empty() || _cdata.pending_replacements.empty()) { // set can be empty for testing scenario.
|
||||
return;
|
||||
}
|
||||
// Releases reference to sstables compacted by this compaction or another, both of which belongs
|
||||
// to the same column family
|
||||
for (auto& pending_replacement : pending_replacements) {
|
||||
for (auto& pending_replacement : _cdata.pending_replacements) {
|
||||
for (auto& sst : pending_replacement.removed) {
|
||||
// Set may not contain sstable to be removed because this compaction may have started
|
||||
// before the creation of that sstable.
|
||||
@@ -1035,81 +1054,40 @@ private:
|
||||
}
|
||||
}
|
||||
_selector.emplace(_sstable_set->make_incremental_selector());
|
||||
}
|
||||
};
|
||||
|
||||
class reshape_compaction : public regular_compaction {
|
||||
private:
|
||||
bool has_sstable_replacer() const noexcept {
|
||||
return bool(_replacer);
|
||||
}
|
||||
public:
|
||||
reshape_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata) {
|
||||
}
|
||||
|
||||
virtual sstables::sstable_set make_sstable_set_for_input() const override {
|
||||
return sstables::make_partitioned_sstable_set(_schema, false);
|
||||
}
|
||||
|
||||
// Unconditionally enable incremental compaction if the strategy specifies a max output size, e.g. LCS.
|
||||
virtual bool enable_garbage_collected_sstable_writer() const noexcept override {
|
||||
return _max_sstable_size != std::numeric_limits<uint64_t>::max() && bool(_replacer);
|
||||
}
|
||||
|
||||
flat_mutation_reader_v2 make_sstable_reader() const override {
|
||||
return _compacting->make_local_shard_sstable_reader(_schema,
|
||||
_permit,
|
||||
query::full_partition_range,
|
||||
_schema->full_slice(),
|
||||
_io_priority,
|
||||
tracing::trace_state_ptr(),
|
||||
::streamed_mutation::forwarding::no,
|
||||
::mutation_reader::forwarding::no,
|
||||
default_read_monitor_generator());
|
||||
}
|
||||
|
||||
std::string_view report_start_desc() const override {
|
||||
return "Reshaping";
|
||||
}
|
||||
|
||||
std::string_view report_finish_desc() const override {
|
||||
return "Reshaped";
|
||||
}
|
||||
|
||||
virtual compaction_writer create_compaction_writer(const dht::decorated_key& dk) override {
|
||||
auto sst = _sstable_creator(this_shard_id());
|
||||
setup_new_sstable(sst);
|
||||
|
||||
sstable_writer_config cfg = make_sstable_writer_config(compaction_type::Reshape);
|
||||
return compaction_writer{sst->get_writer(*_schema, partitions_per_sstable(), cfg, get_encoding_stats(), _io_priority), sst};
|
||||
}
|
||||
|
||||
virtual void stop_sstable_writer(compaction_writer* writer) override {
|
||||
if (writer) {
|
||||
if (has_sstable_replacer()) {
|
||||
regular_compaction::stop_sstable_writer(writer);
|
||||
} else {
|
||||
finish_new_sstable(writer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtual void on_end_of_compaction() override {
|
||||
if (has_sstable_replacer()) {
|
||||
regular_compaction::on_end_of_compaction();
|
||||
}
|
||||
_cdata.pending_replacements.clear();
|
||||
}
|
||||
};
|
||||
|
||||
class cleanup_compaction final : public regular_compaction {
|
||||
owned_ranges_ptr _owned_ranges;
|
||||
mutable dht::incremental_owned_ranges_checker _owned_ranges_checker;
|
||||
class incremental_owned_ranges_checker {
|
||||
const dht::token_range_vector& _sorted_owned_ranges;
|
||||
mutable dht::token_range_vector::const_iterator _it;
|
||||
public:
|
||||
incremental_owned_ranges_checker(const dht::token_range_vector& sorted_owned_ranges)
|
||||
: _sorted_owned_ranges(sorted_owned_ranges)
|
||||
, _it(_sorted_owned_ranges.begin()) {
|
||||
}
|
||||
|
||||
// Must be called with increasing token values.
|
||||
bool belongs_to_current_node(const dht::token& t) const {
|
||||
// While token T is after a range Rn, advance the iterator.
|
||||
// iterator will be stopped at a range which either overlaps with T (if T belongs to node),
|
||||
// or at a range which is after T (if T doesn't belong to this node).
|
||||
while (_it != _sorted_owned_ranges.end() && _it->after(t, dht::token_comparator())) {
|
||||
_it++;
|
||||
}
|
||||
|
||||
return _it != _sorted_owned_ranges.end() && _it->contains(t, dht::token_comparator());
|
||||
}
|
||||
};
|
||||
|
||||
const dht::token_range_vector _owned_ranges;
|
||||
incremental_owned_ranges_checker _owned_ranges_checker;
|
||||
private:
|
||||
// Called in a seastar thread
|
||||
dht::partition_range_vector
|
||||
get_ranges_for_invalidation(const std::vector<shared_sstable>& sstables) {
|
||||
auto owned_ranges = dht::to_partition_ranges(*_owned_ranges, utils::can_yield::yes);
|
||||
auto owned_ranges = dht::to_partition_ranges(_owned_ranges, utils::can_yield::yes);
|
||||
|
||||
auto non_owned_ranges = boost::copy_range<dht::partition_range_vector>(sstables
|
||||
| boost::adaptors::transformed([] (const shared_sstable& sst) {
|
||||
@@ -1141,10 +1119,10 @@ protected:
|
||||
}
|
||||
|
||||
private:
|
||||
cleanup_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, owned_ranges_ptr owned_ranges)
|
||||
cleanup_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, dht::token_range_vector owned_ranges)
|
||||
: regular_compaction(table_s, std::move(descriptor), cdata)
|
||||
, _owned_ranges(std::move(owned_ranges))
|
||||
, _owned_ranges_checker(*_owned_ranges)
|
||||
, _owned_ranges_checker(_owned_ranges)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -1166,7 +1144,7 @@ public:
|
||||
return "Cleaned";
|
||||
}
|
||||
|
||||
flat_mutation_reader_v2::filter make_partition_filter() const {
|
||||
flat_mutation_reader::filter make_partition_filter() const {
|
||||
return [this] (const dht::decorated_key& dk) {
|
||||
#ifdef SEASTAR_DEBUG
|
||||
// sstables should never be shared with other shards at this point.
|
||||
@@ -1255,14 +1233,12 @@ private:
|
||||
flat_mutation_reader_v2 _reader;
|
||||
mutation_fragment_stream_validator _validator;
|
||||
bool _skip_to_next_partition = false;
|
||||
uint64_t& _validation_errors;
|
||||
|
||||
private:
|
||||
void maybe_abort_scrub() {
|
||||
if (_scrub_mode == compaction_type_options::scrub::mode::abort) {
|
||||
throw compaction_aborted_exception(_schema->ks_name(), _schema->cf_name(), "scrub compaction found invalid data");
|
||||
}
|
||||
++_validation_errors;
|
||||
}
|
||||
|
||||
void on_unexpected_partition_start(const mutation_fragment_v2& ps) {
|
||||
@@ -1343,7 +1319,6 @@ private:
|
||||
}
|
||||
|
||||
void fill_buffer_from_underlying() {
|
||||
utils::get_local_injector().inject("rest_api_keyspace_scrub_abort", [] { throw compaction_aborted_exception("", "", "scrub compaction found invalid data"); });
|
||||
while (!_reader.is_buffer_empty() && !is_buffer_full()) {
|
||||
auto mf = _reader.pop_mutation_fragment();
|
||||
if (mf.is_partition_start()) {
|
||||
@@ -1383,12 +1358,11 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
reader(flat_mutation_reader_v2 underlying, compaction_type_options::scrub::mode scrub_mode, uint64_t& validation_errors)
|
||||
reader(flat_mutation_reader_v2 underlying, compaction_type_options::scrub::mode scrub_mode)
|
||||
: impl(underlying.schema(), underlying.permit())
|
||||
, _scrub_mode(scrub_mode)
|
||||
, _reader(std::move(underlying))
|
||||
, _validator(*_schema)
|
||||
, _validation_errors(validation_errors)
|
||||
{ }
|
||||
virtual future<> fill_buffer() override {
|
||||
if (_end_of_stream) {
|
||||
@@ -1436,7 +1410,6 @@ private:
|
||||
std::string _scrub_start_description;
|
||||
mutable std::string _scrub_finish_description;
|
||||
uint64_t _bucket_count = 0;
|
||||
mutable uint64_t _validation_errors = 0;
|
||||
|
||||
public:
|
||||
scrub_compaction(table_state& table_s, compaction_descriptor descriptor, compaction_data& cdata, compaction_type_options::scrub options)
|
||||
@@ -1459,7 +1432,7 @@ public:
|
||||
|
||||
flat_mutation_reader_v2 make_sstable_reader() const override {
|
||||
auto crawling_reader = _compacting->make_crawling_reader(_schema, _permit, _io_priority, nullptr);
|
||||
return make_flat_mutation_reader_v2<reader>(std::move(crawling_reader), _options.operation_mode, _validation_errors);
|
||||
return make_flat_mutation_reader_v2<reader>(std::move(crawling_reader), _options.operation_mode);
|
||||
}
|
||||
|
||||
uint64_t partitions_per_sstable() const override {
|
||||
@@ -1490,17 +1463,12 @@ public:
|
||||
return _options.operation_mode == compaction_type_options::scrub::mode::segregate;
|
||||
}
|
||||
|
||||
compaction_result finish(std::chrono::time_point<db_clock> started_at, std::chrono::time_point<db_clock> ended_at) override {
|
||||
auto ret = compaction::finish(started_at, ended_at);
|
||||
ret.stats.validation_errors = _validation_errors;
|
||||
return ret;
|
||||
}
|
||||
|
||||
friend flat_mutation_reader_v2 make_scrubbing_reader(flat_mutation_reader_v2 rd, compaction_type_options::scrub::mode scrub_mode, uint64_t& validation_errors);
|
||||
friend flat_mutation_reader_v2 make_scrubbing_reader(flat_mutation_reader_v2 rd, compaction_type_options::scrub::mode scrub_mode);
|
||||
friend flat_mutation_reader make_scrubbing_reader(flat_mutation_reader rd, compaction_type_options::scrub::mode scrub_mode);
|
||||
};
|
||||
|
||||
flat_mutation_reader_v2 make_scrubbing_reader(flat_mutation_reader_v2 rd, compaction_type_options::scrub::mode scrub_mode, uint64_t& validation_errors) {
|
||||
return make_flat_mutation_reader_v2<scrub_compaction::reader>(std::move(rd), scrub_mode, validation_errors);
|
||||
flat_mutation_reader_v2 make_scrubbing_reader(flat_mutation_reader_v2 rd, compaction_type_options::scrub::mode scrub_mode) {
|
||||
return make_flat_mutation_reader_v2<scrub_compaction::reader>(std::move(rd), scrub_mode);
|
||||
}
|
||||
|
||||
class resharding_compaction final : public compaction {
|
||||
@@ -1599,14 +1567,14 @@ public:
|
||||
|
||||
future<compaction_result> compaction::run(std::unique_ptr<compaction> c) {
|
||||
return seastar::async([c = std::move(c)] () mutable {
|
||||
c->setup().get();
|
||||
c->setup();
|
||||
auto consumer = c->consume();
|
||||
|
||||
auto start_time = db_clock::now();
|
||||
try {
|
||||
consumer.get();
|
||||
} catch (...) {
|
||||
c->on_interrupt(std::current_exception());
|
||||
c->delete_sstables_for_interrupted_compaction();
|
||||
c = nullptr; // make sure writers are stopped while running in thread context. This is because of calls to file.close().get();
|
||||
throw;
|
||||
}
|
||||
@@ -1658,10 +1626,10 @@ static std::unique_ptr<compaction> make_compaction(table_state& table_s, sstable
|
||||
return descriptor.options.visit(visitor_factory);
|
||||
}
|
||||
|
||||
future<uint64_t> scrub_validate_mode_validate_reader(flat_mutation_reader_v2 reader, const compaction_data& cdata) {
|
||||
future<bool> scrub_validate_mode_validate_reader(flat_mutation_reader_v2 reader, const compaction_data& cdata) {
|
||||
auto schema = reader.schema();
|
||||
|
||||
uint64_t errors = 0;
|
||||
bool valid = true;
|
||||
std::exception_ptr ex;
|
||||
|
||||
try {
|
||||
@@ -1680,24 +1648,24 @@ future<uint64_t> scrub_validate_mode_validate_reader(flat_mutation_reader_v2 rea
|
||||
if (!validator(mf)) {
|
||||
scrub_compaction::report_invalid_partition_start(compaction_type::Scrub, validator, ps.key());
|
||||
validator.reset(mf);
|
||||
++errors;
|
||||
valid = false;
|
||||
}
|
||||
if (!validator(ps.key())) {
|
||||
scrub_compaction::report_invalid_partition(compaction_type::Scrub, validator, ps.key());
|
||||
validator.reset(ps.key());
|
||||
++errors;
|
||||
valid = false;
|
||||
}
|
||||
} else {
|
||||
if (!validator(mf)) {
|
||||
scrub_compaction::report_invalid_mutation_fragment(compaction_type::Scrub, validator, mf);
|
||||
validator.reset(mf);
|
||||
++errors;
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!validator.on_end_of_stream()) {
|
||||
scrub_compaction::report_invalid_end_of_stream(compaction_type::Scrub, validator);
|
||||
++errors;
|
||||
valid = false;
|
||||
}
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
@@ -1709,14 +1677,14 @@ future<uint64_t> scrub_validate_mode_validate_reader(flat_mutation_reader_v2 rea
|
||||
co_return coroutine::exception(std::move(ex));
|
||||
}
|
||||
|
||||
co_return errors;
|
||||
co_return valid;
|
||||
}
|
||||
|
||||
static future<compaction_result> scrub_sstables_validate_mode(sstables::compaction_descriptor descriptor, compaction_data& cdata, table_state& table_s) {
|
||||
auto schema = table_s.schema();
|
||||
|
||||
formatted_sstables_list sstables_list_msg;
|
||||
auto sstables = make_lw_shared<sstables::sstable_set>(sstables::make_partitioned_sstable_set(schema, false));
|
||||
auto sstables = make_lw_shared<sstables::sstable_set>(sstables::make_partitioned_sstable_set(schema, make_lw_shared<sstable_list>(sstable_list{}), false));
|
||||
for (const auto& sst : descriptor.sstables) {
|
||||
sstables_list_msg += sst;
|
||||
sstables->insert(sst);
|
||||
@@ -1727,11 +1695,11 @@ static future<compaction_result> scrub_sstables_validate_mode(sstables::compacti
|
||||
auto permit = table_s.make_compaction_reader_permit();
|
||||
auto reader = sstables->make_crawling_reader(schema, permit, descriptor.io_priority, nullptr);
|
||||
|
||||
const auto validation_errors = co_await scrub_validate_mode_validate_reader(std::move(reader), cdata);
|
||||
const auto valid = co_await scrub_validate_mode_validate_reader(std::move(reader), cdata);
|
||||
|
||||
clogger.info("Finished scrubbing in validate mode {} - sstable(s) are {}", sstables_list_msg, validation_errors == 0 ? "valid" : "invalid");
|
||||
clogger.info("Finished scrubbing in validate mode {} - sstable(s) are {}", sstables_list_msg, valid ? "valid" : "invalid");
|
||||
|
||||
if (validation_errors != 0) {
|
||||
if (!valid) {
|
||||
for (auto& sst : *sstables->all()) {
|
||||
co_await sst->move_to_quarantine();
|
||||
}
|
||||
@@ -1739,10 +1707,7 @@ static future<compaction_result> scrub_sstables_validate_mode(sstables::compacti
|
||||
|
||||
co_return compaction_result {
|
||||
.new_sstables = {},
|
||||
.stats = {
|
||||
.ended_at = db_clock::now(),
|
||||
.validation_errors = validation_errors,
|
||||
},
|
||||
.ended_at = db_clock::now(),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1781,13 +1746,13 @@ get_fully_expired_sstables(const table_state& table_s, const std::vector<sstable
|
||||
}
|
||||
}
|
||||
|
||||
auto compacted_undeleted_gens = boost::copy_range<std::unordered_set<generation_type>>(table_s.compacted_undeleted_sstables()
|
||||
auto compacted_undeleted_gens = boost::copy_range<std::unordered_set<int64_t>>(table_s.compacted_undeleted_sstables()
|
||||
| boost::adaptors::transformed(std::mem_fn(&sstables::sstable::generation)));
|
||||
auto has_undeleted_ancestor = [&compacted_undeleted_gens] (auto& candidate) {
|
||||
// Get ancestors from sstable which is empty after restart. It works for this purpose because
|
||||
// we only need to check that a sstable compacted *in this instance* hasn't an ancestor undeleted.
|
||||
// Not getting it from sstable metadata because mc format hasn't it available.
|
||||
return boost::algorithm::any_of(candidate->compaction_ancestors(), [&compacted_undeleted_gens] (const generation_type& gen) {
|
||||
return boost::algorithm::any_of(candidate->compaction_ancestors(), [&compacted_undeleted_gens] (auto gen) {
|
||||
return compacted_undeleted_gens.contains(gen);
|
||||
});
|
||||
};
|
||||
@@ -1828,8 +1793,4 @@ unsigned compaction_descriptor::fan_in() const {
|
||||
return boost::copy_range<std::unordered_set<utils::UUID>>(sstables | boost::adaptors::transformed(std::mem_fn(&sstables::sstable::run_identifier))).size();
|
||||
}
|
||||
|
||||
uint64_t compaction_descriptor::sstables_size() const {
|
||||
return boost::accumulate(sstables | boost::adaptors::transformed(std::mem_fn(&sstables::sstable::data_size)), uint64_t(0));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
#include "utils/UUID.hh"
|
||||
#include "table_state.hh"
|
||||
#include <seastar/core/thread.hh>
|
||||
#include <seastar/core/abort_source.hh>
|
||||
|
||||
class flat_mutation_reader;
|
||||
using namespace compaction;
|
||||
|
||||
namespace sstables {
|
||||
@@ -40,19 +40,9 @@ public:
|
||||
friend std::ostream& operator<<(std::ostream&, pretty_printed_throughput);
|
||||
};
|
||||
|
||||
// Return the name of the compaction type
|
||||
// as used over the REST api, e.g. "COMPACTION" or "CLEANUP".
|
||||
sstring compaction_name(compaction_type type);
|
||||
|
||||
// Reverse map the name of the compaction type
|
||||
// as used over the REST api, e.g. "COMPACTION" or "CLEANUP",
|
||||
// to the compaction_type enum code.
|
||||
compaction_type to_compaction_type(sstring type_name);
|
||||
|
||||
// Return a string respresenting the compaction type
|
||||
// as a verb for logging purposes, e.g. "Compact" or "Cleanup".
|
||||
std::string_view to_string(compaction_type type);
|
||||
|
||||
struct compaction_info {
|
||||
utils::UUID compaction_uuid;
|
||||
compaction_type type = compaction_type::Compaction;
|
||||
@@ -66,7 +56,6 @@ struct compaction_data {
|
||||
uint64_t total_partitions = 0;
|
||||
uint64_t total_keys_written = 0;
|
||||
sstring stop_requested;
|
||||
abort_source abort;
|
||||
utils::UUID compaction_uuid;
|
||||
unsigned compaction_fan_in = 0;
|
||||
struct replacement {
|
||||
@@ -80,39 +69,14 @@ struct compaction_data {
|
||||
}
|
||||
|
||||
void stop(sstring reason) {
|
||||
if (!abort.abort_requested()) {
|
||||
stop_requested = std::move(reason);
|
||||
abort.request_abort();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct compaction_stats {
|
||||
std::chrono::time_point<db_clock> ended_at;
|
||||
uint64_t start_size = 0;
|
||||
uint64_t end_size = 0;
|
||||
uint64_t validation_errors = 0;
|
||||
// Bloom filter checks during max purgeable calculation
|
||||
uint64_t bloom_filter_checks = 0;
|
||||
|
||||
compaction_stats& operator+=(const compaction_stats& r) {
|
||||
ended_at = std::max(ended_at, r.ended_at);
|
||||
start_size += r.start_size;
|
||||
end_size += r.end_size;
|
||||
validation_errors += r.validation_errors;
|
||||
bloom_filter_checks += r.bloom_filter_checks;
|
||||
return *this;
|
||||
}
|
||||
friend compaction_stats operator+(const compaction_stats& l, const compaction_stats& r) {
|
||||
auto tmp = l;
|
||||
tmp += r;
|
||||
return tmp;
|
||||
stop_requested = std::move(reason);
|
||||
}
|
||||
};
|
||||
|
||||
struct compaction_result {
|
||||
std::vector<sstables::shared_sstable> new_sstables;
|
||||
compaction_stats stats;
|
||||
std::chrono::time_point<db_clock> ended_at;
|
||||
uint64_t end_size = 0;
|
||||
};
|
||||
|
||||
// Compact a list of N sstables into M sstables.
|
||||
@@ -131,9 +95,9 @@ std::unordered_set<sstables::shared_sstable>
|
||||
get_fully_expired_sstables(const table_state& table_s, const std::vector<sstables::shared_sstable>& compacting, gc_clock::time_point gc_before);
|
||||
|
||||
// For tests, can drop after we virtualize sstables.
|
||||
flat_mutation_reader_v2 make_scrubbing_reader(flat_mutation_reader_v2 rd, compaction_type_options::scrub::mode scrub_mode, uint64_t& validation_errors);
|
||||
flat_mutation_reader_v2 make_scrubbing_reader(flat_mutation_reader_v2 rd, compaction_type_options::scrub::mode scrub_mode);
|
||||
|
||||
// For tests, can drop after we virtualize sstables.
|
||||
future<uint64_t> scrub_validate_mode_validate_reader(flat_mutation_reader_v2 rd, const compaction_data& info);
|
||||
future<bool> scrub_validate_mode_validate_reader(flat_mutation_reader_v2 rd, const compaction_data& info);
|
||||
|
||||
}
|
||||
|
||||
@@ -60,7 +60,8 @@ public:
|
||||
using ongoing_compactions = std::unordered_map<sstables::shared_sstable, backlog_read_progress_manager*>;
|
||||
|
||||
struct impl {
|
||||
virtual void replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) = 0;
|
||||
virtual void add_sstable(sstables::shared_sstable sst) = 0;
|
||||
virtual void remove_sstable(sstables::shared_sstable sst) = 0;
|
||||
virtual double backlog(const ongoing_writes& ow, const ongoing_compactions& oc) const = 0;
|
||||
virtual ~impl() { }
|
||||
};
|
||||
@@ -71,21 +72,22 @@ public:
|
||||
~compaction_backlog_tracker();
|
||||
|
||||
double backlog() const;
|
||||
void replace_sstables(const std::vector<sstables::shared_sstable>& old_ssts, const std::vector<sstables::shared_sstable>& new_ssts);
|
||||
void add_sstable(sstables::shared_sstable sst);
|
||||
void remove_sstable(sstables::shared_sstable sst);
|
||||
void register_partially_written_sstable(sstables::shared_sstable sst, backlog_write_progress_manager& wp);
|
||||
void register_compacting_sstable(sstables::shared_sstable sst, backlog_read_progress_manager& rp);
|
||||
void transfer_ongoing_charges(compaction_backlog_tracker& new_bt, bool move_read_charges = true);
|
||||
void revert_charges(sstables::shared_sstable sst);
|
||||
|
||||
void disable() {
|
||||
_impl = {};
|
||||
_ongoing_writes = {};
|
||||
_ongoing_compactions = {};
|
||||
}
|
||||
private:
|
||||
// Returns true if this SSTable can be added or removed from the tracker.
|
||||
bool sstable_belongs_to_tracker(const sstables::shared_sstable& sst);
|
||||
bool disabled() const noexcept { return !_impl; }
|
||||
|
||||
void disable() {
|
||||
_disabled = true;
|
||||
_ongoing_writes = {};
|
||||
_ongoing_compactions = {};
|
||||
}
|
||||
bool _disabled = false;
|
||||
std::unique_ptr<impl> _impl;
|
||||
// We keep track of this so that we can transfer to a new tracker if the compaction strategy is
|
||||
// changed in the middle of a compaction.
|
||||
|
||||
@@ -20,16 +20,6 @@
|
||||
#include "dht/i_partitioner.hh"
|
||||
#include "compaction_weight_registration.hh"
|
||||
|
||||
namespace compaction {
|
||||
|
||||
using owned_ranges_ptr = lw_shared_ptr<const dht::token_range_vector>;
|
||||
|
||||
inline owned_ranges_ptr make_owned_ranges_ptr(dht::token_range_vector&& ranges) {
|
||||
return make_lw_shared<const dht::token_range_vector>(std::move(ranges));
|
||||
}
|
||||
|
||||
} // namespace compaction
|
||||
|
||||
namespace sstables {
|
||||
|
||||
enum class compaction_type {
|
||||
@@ -64,10 +54,10 @@ public:
|
||||
struct regular {
|
||||
};
|
||||
struct cleanup {
|
||||
compaction::owned_ranges_ptr owned_ranges;
|
||||
dht::token_range_vector owned_ranges;
|
||||
};
|
||||
struct upgrade {
|
||||
compaction::owned_ranges_ptr owned_ranges;
|
||||
dht::token_range_vector owned_ranges;
|
||||
};
|
||||
struct scrub {
|
||||
enum class mode {
|
||||
@@ -112,11 +102,11 @@ public:
|
||||
return compaction_type_options(regular{});
|
||||
}
|
||||
|
||||
static compaction_type_options make_cleanup(compaction::owned_ranges_ptr owned_ranges) {
|
||||
static compaction_type_options make_cleanup(dht::token_range_vector&& owned_ranges) {
|
||||
return compaction_type_options(cleanup{std::move(owned_ranges)});
|
||||
}
|
||||
|
||||
static compaction_type_options make_upgrade(compaction::owned_ranges_ptr owned_ranges) {
|
||||
static compaction_type_options make_upgrade(dht::token_range_vector&& owned_ranges) {
|
||||
return compaction_type_options(upgrade{std::move(owned_ranges)});
|
||||
}
|
||||
|
||||
@@ -155,6 +145,8 @@ struct compaction_descriptor {
|
||||
uint64_t max_sstable_bytes;
|
||||
// Run identifier of output sstables.
|
||||
utils::UUID run_identifier;
|
||||
// Calls compaction manager's task for this compaction to release reference to exhausted sstables.
|
||||
std::function<void(const std::vector<shared_sstable>& exhausted_sstables)> release_exhausted;
|
||||
// The options passed down to the compaction code.
|
||||
// This also selects the kind of compaction to do.
|
||||
compaction_type_options options = compaction_type_options::make_regular();
|
||||
@@ -173,12 +165,14 @@ struct compaction_descriptor {
|
||||
static constexpr uint64_t default_max_sstable_bytes = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
explicit compaction_descriptor(std::vector<sstables::shared_sstable> sstables,
|
||||
std::optional<sstables::sstable_set> all_sstables_snapshot,
|
||||
::io_priority_class io_priority,
|
||||
int level = default_level,
|
||||
uint64_t max_sstable_bytes = default_max_sstable_bytes,
|
||||
utils::UUID run_identifier = utils::make_random_uuid(),
|
||||
compaction_type_options options = compaction_type_options::make_regular())
|
||||
: sstables(std::move(sstables))
|
||||
, all_sstables_snapshot(std::move(all_sstables_snapshot))
|
||||
, level(level)
|
||||
, max_sstable_bytes(max_sstable_bytes)
|
||||
, run_identifier(run_identifier)
|
||||
@@ -188,8 +182,10 @@ struct compaction_descriptor {
|
||||
|
||||
explicit compaction_descriptor(sstables::has_only_fully_expired has_only_fully_expired,
|
||||
std::vector<sstables::shared_sstable> sstables,
|
||||
std::optional<sstables::sstable_set> all_sstables_snapshot,
|
||||
::io_priority_class io_priority)
|
||||
: sstables(std::move(sstables))
|
||||
, all_sstables_snapshot(std::move(all_sstables_snapshot))
|
||||
, level(default_level)
|
||||
, max_sstable_bytes(default_max_sstable_bytes)
|
||||
, run_identifier(utils::make_random_uuid())
|
||||
@@ -200,10 +196,6 @@ struct compaction_descriptor {
|
||||
|
||||
// Return fan-in of this job, which is equal to its number of runs.
|
||||
unsigned fan_in() const;
|
||||
// Enables garbage collection for this descriptor, meaning that compaction will be able to purge expired data
|
||||
void enable_garbage_collection(sstables::sstable_set snapshot) { all_sstables_snapshot = std::move(snapshot); }
|
||||
// Returns total size of all sstables contained in this descriptor
|
||||
uint64_t sstables_size() const;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -20,8 +20,6 @@
|
||||
#include <seastar/core/condition-variable.hh>
|
||||
#include "log.hh"
|
||||
#include "utils/exponential_backoff_retry.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
#include "utils/serialized_action.hh"
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <functional>
|
||||
@@ -29,34 +27,33 @@
|
||||
#include "compaction.hh"
|
||||
#include "compaction_weight_registration.hh"
|
||||
#include "compaction_backlog_manager.hh"
|
||||
#include "compaction/compaction_descriptor.hh"
|
||||
#include "strategy_control.hh"
|
||||
#include "backlog_controller.hh"
|
||||
#include "seastarx.hh"
|
||||
#include "sstables/exceptions.hh"
|
||||
|
||||
namespace replica {
|
||||
class table;
|
||||
}
|
||||
|
||||
class compacting_sstable_registration;
|
||||
|
||||
using throw_if_stopping = bool_class<struct throw_if_stopping_tag>;
|
||||
|
||||
// Compaction manager provides facilities to submit and track compaction jobs on
|
||||
// behalf of existing tables.
|
||||
class compaction_manager {
|
||||
public:
|
||||
using compaction_stats_opt = std::optional<sstables::compaction_stats>;
|
||||
struct stats {
|
||||
int64_t pending_tasks = 0;
|
||||
int64_t completed_tasks = 0;
|
||||
uint64_t active_tasks = 0; // Number of compaction going on.
|
||||
int64_t errors = 0;
|
||||
};
|
||||
using scheduling_group = backlog_controller::scheduling_group;
|
||||
struct config {
|
||||
scheduling_group compaction_sched_group;
|
||||
scheduling_group maintenance_sched_group;
|
||||
size_t available_memory = 0;
|
||||
utils::updateable_value<float> static_shares = utils::updateable_value<float>(0);
|
||||
utils::updateable_value<uint32_t> throughput_mb_per_sec = utils::updateable_value<uint32_t>(0);
|
||||
struct compaction_scheduling_group {
|
||||
seastar::scheduling_group cpu;
|
||||
const ::io_priority_class& io;
|
||||
};
|
||||
struct maintenance_scheduling_group {
|
||||
seastar::scheduling_group cpu;
|
||||
const ::io_priority_class& io;
|
||||
};
|
||||
private:
|
||||
struct compaction_state {
|
||||
@@ -70,177 +67,46 @@ private:
|
||||
// Raised by any function running under run_with_compaction_disabled();
|
||||
long compaction_disabled_counter = 0;
|
||||
|
||||
// Signaled whenever a compaction task completes.
|
||||
condition_variable compaction_done;
|
||||
|
||||
compaction_state() = default;
|
||||
compaction_state(compaction_state&&) = default;
|
||||
~compaction_state();
|
||||
|
||||
bool compaction_disabled() const noexcept {
|
||||
return compaction_disabled_counter > 0;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
class can_purge_tombstones_tag;
|
||||
using can_purge_tombstones = bool_class<can_purge_tombstones_tag>;
|
||||
struct task {
|
||||
replica::table* compacting_table = nullptr;
|
||||
shared_future<> compaction_done = make_ready_future<>();
|
||||
exponential_backoff_retry compaction_retry = exponential_backoff_retry(std::chrono::seconds(5), std::chrono::seconds(300));
|
||||
bool stopping = false;
|
||||
sstables::compaction_type type = sstables::compaction_type::Compaction;
|
||||
bool compaction_running = false;
|
||||
std::optional<utils::UUID> output_run_identifier;
|
||||
sstables::compaction_data compaction_data;
|
||||
compaction_state& compaction_state;
|
||||
gate::holder gate_holder;
|
||||
|
||||
class task {
|
||||
public:
|
||||
enum class state {
|
||||
none, // initial and final state
|
||||
pending, // task is blocked on a lock, may alternate with active
|
||||
// counted in compaction_manager::stats::pending_tasks
|
||||
active, // task initiated active compaction, may alternate with pending
|
||||
// counted in compaction_manager::stats::active_tasks
|
||||
done, // task completed successfully (may transition only to state::none)
|
||||
// counted in compaction_manager::stats::completed_tasks
|
||||
postponed, // task was postponed (may transition only to state::none)
|
||||
// represented by the postponed_compactions metric
|
||||
failed, // task failed (may transition only to state::none)
|
||||
// counted in compaction_manager::stats::errors
|
||||
};
|
||||
static std::string_view to_string(state);
|
||||
protected:
|
||||
compaction_manager& _cm;
|
||||
compaction::table_state* _compacting_table = nullptr;
|
||||
compaction_state& _compaction_state;
|
||||
sstables::compaction_data _compaction_data;
|
||||
state _state = state::none;
|
||||
|
||||
private:
|
||||
shared_future<compaction_stats_opt> _compaction_done = make_ready_future<compaction_stats_opt>();
|
||||
exponential_backoff_retry _compaction_retry = exponential_backoff_retry(std::chrono::seconds(5), std::chrono::seconds(300));
|
||||
sstables::compaction_type _type;
|
||||
utils::UUID _output_run_identifier;
|
||||
gate::holder _gate_holder;
|
||||
sstring _description;
|
||||
|
||||
public:
|
||||
explicit task(compaction_manager& mgr, compaction::table_state* t, sstables::compaction_type type, sstring desc);
|
||||
explicit task(replica::table* t, sstables::compaction_type type, struct compaction_state& cs)
|
||||
: compacting_table(t)
|
||||
, type(type)
|
||||
, compaction_state(cs)
|
||||
, gate_holder(compaction_state.gate.hold())
|
||||
{}
|
||||
|
||||
task(task&&) = delete;
|
||||
task(const task&) = delete;
|
||||
|
||||
virtual ~task();
|
||||
|
||||
// called when a compaction replaces the exhausted sstables with the new set
|
||||
struct on_replacement {
|
||||
virtual ~on_replacement() {}
|
||||
// called after the replacement completes
|
||||
// @param sstables the old sstable which are replaced in this replacement
|
||||
virtual void on_removal(const std::vector<sstables::shared_sstable>& sstables) = 0;
|
||||
// called before the replacement happens
|
||||
// @param sstables the new sstables to be added to the table's sstable set
|
||||
virtual void on_addition(const std::vector<sstables::shared_sstable>& sstables) = 0;
|
||||
};
|
||||
|
||||
protected:
|
||||
virtual future<compaction_stats_opt> do_run() = 0;
|
||||
|
||||
state switch_state(state new_state);
|
||||
|
||||
future<semaphore_units<named_semaphore_exception_factory>> acquire_semaphore(named_semaphore& sem, size_t units = 1);
|
||||
|
||||
// Return true if the task isn't stopped
|
||||
// and the compaction manager allows proceeding.
|
||||
inline bool can_proceed(throw_if_stopping do_throw_if_stopping = throw_if_stopping::no) const;
|
||||
void setup_new_compaction(utils::UUID output_run_id = utils::null_uuid());
|
||||
void finish_compaction(state finish_state = state::done) noexcept;
|
||||
|
||||
// Compaction manager stop itself if it finds an storage I/O error which results in
|
||||
// stop of transportation services. It cannot make progress anyway.
|
||||
// Returns exception if error is judged fatal, and compaction task must be stopped,
|
||||
// otherwise, returns stop_iteration::no after sleep for exponential retry.
|
||||
future<stop_iteration> maybe_retry(std::exception_ptr err, bool throw_on_abort = false);
|
||||
|
||||
future<sstables::compaction_result> compact_sstables_and_update_history(sstables::compaction_descriptor descriptor, sstables::compaction_data& cdata, on_replacement&,
|
||||
can_purge_tombstones can_purge = can_purge_tombstones::yes);
|
||||
future<sstables::compaction_result> compact_sstables(sstables::compaction_descriptor descriptor, sstables::compaction_data& cdata, on_replacement&,
|
||||
can_purge_tombstones can_purge = can_purge_tombstones::yes, sstables::offstrategy offstrategy = sstables::offstrategy::no);
|
||||
future<> update_history(compaction::table_state& t, const sstables::compaction_result& res, const sstables::compaction_data& cdata);
|
||||
bool should_update_history(sstables::compaction_type ct) {
|
||||
return ct == sstables::compaction_type::Compaction;
|
||||
}
|
||||
public:
|
||||
future<compaction_stats_opt> run() noexcept;
|
||||
|
||||
const compaction::table_state* compacting_table() const noexcept {
|
||||
return _compacting_table;
|
||||
}
|
||||
|
||||
sstables::compaction_type type() const noexcept {
|
||||
return _type;
|
||||
}
|
||||
|
||||
bool compaction_running() const noexcept {
|
||||
return _state == state::active;
|
||||
}
|
||||
|
||||
const sstables::compaction_data& compaction_data() const noexcept {
|
||||
return _compaction_data;
|
||||
}
|
||||
|
||||
sstables::compaction_data& compaction_data() noexcept {
|
||||
return _compaction_data;
|
||||
}
|
||||
void setup_new_compaction();
|
||||
void finish_compaction() noexcept;
|
||||
|
||||
bool generating_output_run() const noexcept {
|
||||
return compaction_running() && _output_run_identifier;
|
||||
return compaction_running && output_run_identifier;
|
||||
}
|
||||
const utils::UUID& output_run_id() const noexcept {
|
||||
return _output_run_identifier;
|
||||
return *output_run_identifier;
|
||||
}
|
||||
|
||||
const sstring& description() const noexcept {
|
||||
return _description;
|
||||
}
|
||||
|
||||
future<compaction_stats_opt> compaction_done() noexcept {
|
||||
return _compaction_done.get_future();
|
||||
}
|
||||
|
||||
bool stopping() const noexcept {
|
||||
return _compaction_data.abort.abort_requested();
|
||||
}
|
||||
|
||||
void stop(sstring reason) noexcept;
|
||||
|
||||
sstables::compaction_stopped_exception make_compaction_stopped_exception() const;
|
||||
|
||||
std::string describe() const;
|
||||
};
|
||||
|
||||
class sstables_task : public task {
|
||||
protected:
|
||||
std::vector<sstables::shared_sstable> _sstables;
|
||||
|
||||
void set_sstables(std::vector<sstables::shared_sstable> new_sstables);
|
||||
sstables::shared_sstable consume_sstable();
|
||||
|
||||
public:
|
||||
explicit sstables_task(compaction_manager& mgr, compaction::table_state* t, sstables::compaction_type compaction_type, sstring desc, std::vector<sstables::shared_sstable> sstables)
|
||||
: task(mgr, t, compaction_type, std::move(desc))
|
||||
{
|
||||
set_sstables(std::move(sstables));
|
||||
}
|
||||
|
||||
virtual ~sstables_task();
|
||||
};
|
||||
|
||||
class major_compaction_task;
|
||||
class custom_compaction_task;
|
||||
class regular_compaction_task;
|
||||
class offstrategy_compaction_task;
|
||||
class rewrite_sstables_compaction_task;
|
||||
class cleanup_sstables_compaction_task;
|
||||
class validate_sstables_compaction_task;
|
||||
class compaction_manager_test_task;
|
||||
|
||||
private:
|
||||
// compaction manager may have N fibers to allow parallel compaction per shard.
|
||||
std::list<shared_ptr<task>> _tasks;
|
||||
std::list<lw_shared_ptr<task>> _tasks;
|
||||
|
||||
// Possible states in which the compaction manager can be found.
|
||||
//
|
||||
@@ -270,51 +136,33 @@ private:
|
||||
future<> _waiting_reevalution = make_ready_future<>();
|
||||
condition_variable _postponed_reevaluation;
|
||||
// tables that wait for compaction but had its submission postponed due to ongoing compaction.
|
||||
std::unordered_set<compaction::table_state*> _postponed;
|
||||
std::unordered_set<replica::table*> _postponed;
|
||||
// tracks taken weights of ongoing compactions, only one compaction per weight is allowed.
|
||||
// weight is value assigned to a compaction job that is log base N of total size of all input sstables.
|
||||
std::unordered_set<int> _weight_tracker;
|
||||
|
||||
std::unordered_map<compaction::table_state*, compaction_state> _compaction_state;
|
||||
std::unordered_map<replica::table*, compaction_state> _compaction_state;
|
||||
|
||||
// Purpose is to serialize all maintenance (non regular) compaction activity to reduce aggressiveness and space requirement.
|
||||
// If the operation must be serialized with regular, then the per-table write lock must be taken.
|
||||
seastar::named_semaphore _maintenance_ops_sem = {1, named_semaphore_exception_factory{"maintenance operation"}};
|
||||
|
||||
// This semaphore ensures that off-strategy compaction will be serialized for
|
||||
// all tables, to limit space requirement and protect against candidates
|
||||
// being picked more than once.
|
||||
seastar::named_semaphore _off_strategy_sem = {1, named_semaphore_exception_factory{"off-strategy compaction"}};
|
||||
seastar::named_semaphore _custom_jobs_sem = {1, named_semaphore_exception_factory{"custom jobs"}};
|
||||
|
||||
std::function<void()> compaction_submission_callback();
|
||||
// all registered tables are reevaluated at a constant interval.
|
||||
// Submission is a NO-OP when there's nothing to do, so it's fine to call it regularly.
|
||||
timer<lowres_clock> _compaction_submission_timer = timer<lowres_clock>(compaction_submission_callback());
|
||||
static constexpr std::chrono::seconds periodic_compaction_submission_interval() { return std::chrono::seconds(3600); }
|
||||
|
||||
config _cfg;
|
||||
timer<lowres_clock> _compaction_submission_timer;
|
||||
compaction_controller _compaction_controller;
|
||||
compaction_backlog_manager _backlog_manager;
|
||||
optimized_optional<abort_source::subscription> _early_abort_subscription;
|
||||
serialized_action _throughput_updater;
|
||||
std::optional<utils::observer<uint32_t>> _throughput_option_observer;
|
||||
serialized_action _update_compaction_static_shares_action;
|
||||
utils::observer<float> _compaction_static_shares_observer;
|
||||
uint64_t _validation_errors = 0;
|
||||
|
||||
class strategy_control;
|
||||
std::unique_ptr<strategy_control> _strategy_control;
|
||||
private:
|
||||
future<compaction_stats_opt> perform_task(shared_ptr<task>, throw_if_stopping do_throw_if_stopping = throw_if_stopping::no);
|
||||
|
||||
future<> stop_tasks(std::vector<shared_ptr<task>> tasks, sstring reason);
|
||||
future<> update_throughput(uint32_t value_mbs);
|
||||
future<> task_stop(lw_shared_ptr<task> task, sstring reason);
|
||||
future<> stop_tasks(std::vector<lw_shared_ptr<task>> tasks, sstring reason);
|
||||
|
||||
// Return the largest fan-in of currently running compactions
|
||||
unsigned current_compaction_fan_in_threshold() const;
|
||||
|
||||
// Return true if compaction can be initiated
|
||||
bool can_register_compaction(compaction::table_state& t, int weight, unsigned fan_in) const;
|
||||
bool can_register_compaction(replica::table* t, int weight, unsigned fan_in) const;
|
||||
// Register weight for a table. Do that only if can_register_weight()
|
||||
// returned true.
|
||||
void register_weight(int weight);
|
||||
@@ -322,84 +170,59 @@ private:
|
||||
void deregister_weight(int weight);
|
||||
|
||||
// Get candidates for compaction strategy, which are all sstables but the ones being compacted.
|
||||
std::vector<sstables::shared_sstable> get_candidates(compaction::table_state& t);
|
||||
std::vector<sstables::shared_sstable> get_candidates(const replica::table& t);
|
||||
|
||||
template <typename Iterator, typename Sentinel>
|
||||
requires std::same_as<Sentinel, Iterator> || std::sentinel_for<Sentinel, Iterator>
|
||||
void register_compacting_sstables(Iterator first, Sentinel last);
|
||||
|
||||
template <typename Iterator, typename Sentinel>
|
||||
requires std::same_as<Sentinel, Iterator> || std::sentinel_for<Sentinel, Iterator>
|
||||
void deregister_compacting_sstables(Iterator first, Sentinel last);
|
||||
void register_compacting_sstables(const std::vector<sstables::shared_sstable>& sstables);
|
||||
void deregister_compacting_sstables(const std::vector<sstables::shared_sstable>& sstables);
|
||||
|
||||
// gets the table's compaction state
|
||||
// throws std::out_of_range exception if not found.
|
||||
compaction_state& get_compaction_state(compaction::table_state* t);
|
||||
compaction_state& get_compaction_state(replica::table* t);
|
||||
|
||||
// Return true if compaction manager is enabled and
|
||||
// table still exists and compaction is not disabled for the table.
|
||||
inline bool can_proceed(compaction::table_state* t) const;
|
||||
// Return true if compaction manager and task weren't asked to stop.
|
||||
inline bool can_proceed(const lw_shared_ptr<task>& task);
|
||||
|
||||
future<> postponed_compactions_reevaluation();
|
||||
void reevaluate_postponed_compactions() noexcept;
|
||||
inline future<> put_task_to_sleep(lw_shared_ptr<task>& task);
|
||||
|
||||
// Compaction manager stop itself if it finds an storage I/O error which results in
|
||||
// stop of transportation services. It cannot make progress anyway.
|
||||
// Returns true if error is judged fatal, and compaction task must be stopped
|
||||
inline bool maybe_stop_on_error(std::exception_ptr err, bool can_retry);
|
||||
|
||||
void postponed_compactions_reevaluation();
|
||||
void reevaluate_postponed_compactions();
|
||||
// Postpone compaction for a table that couldn't be executed due to ongoing
|
||||
// similar-sized compaction.
|
||||
void postpone_compaction_for_table(compaction::table_state* t);
|
||||
void postpone_compaction_for_table(replica::table* t);
|
||||
|
||||
future<compaction_stats_opt> perform_sstable_scrub_validate_mode(compaction::table_state& t);
|
||||
future<> update_static_shares(float shares);
|
||||
future<> perform_sstable_scrub_validate_mode(replica::table* t);
|
||||
|
||||
compaction_controller _compaction_controller;
|
||||
compaction_backlog_manager _backlog_manager;
|
||||
maintenance_scheduling_group _maintenance_sg;
|
||||
size_t _available_memory;
|
||||
|
||||
using get_candidates_func = std::function<future<std::vector<sstables::shared_sstable>>()>;
|
||||
class can_purge_tombstones_tag;
|
||||
using can_purge_tombstones = bool_class<can_purge_tombstones_tag>;
|
||||
|
||||
// Guarantees that a maintenance task, e.g. cleanup, will be performed on all files available at the time
|
||||
// by retrieving set of candidates only after all compactions for table T were stopped, if any.
|
||||
template<typename TaskType, typename... Args>
|
||||
requires std::derived_from<TaskType, task>
|
||||
future<compaction_stats_opt> perform_task_on_all_files(compaction::table_state& t, sstables::compaction_type_options options, get_candidates_func, Args... args);
|
||||
future<> rewrite_sstables(replica::table* t, sstables::compaction_type_options options, get_candidates_func, can_purge_tombstones can_purge = can_purge_tombstones::yes);
|
||||
|
||||
future<compaction_stats_opt> rewrite_sstables(compaction::table_state& t, sstables::compaction_type_options options, get_candidates_func, can_purge_tombstones can_purge = can_purge_tombstones::yes);
|
||||
optimized_optional<abort_source::subscription> _early_abort_subscription;
|
||||
|
||||
// Stop all fibers, without waiting. Safe to be called multiple times.
|
||||
void do_stop() noexcept;
|
||||
future<> really_do_stop();
|
||||
|
||||
// Propagate replacement of sstables to all ongoing compaction of a given table
|
||||
void propagate_replacement(compaction::table_state& t, const std::vector<sstables::shared_sstable>& removed, const std::vector<sstables::shared_sstable>& added);
|
||||
|
||||
// This constructor is suposed to only be used for testing so lets be more explicit
|
||||
// about invoking it. Ref #10146
|
||||
compaction_manager();
|
||||
class strategy_control;
|
||||
std::unique_ptr<strategy_control> _strategy_control;
|
||||
public:
|
||||
compaction_manager(config cfg, abort_source& as);
|
||||
compaction_manager(compaction_scheduling_group csg, maintenance_scheduling_group msg, size_t available_memory, abort_source& as);
|
||||
compaction_manager(compaction_scheduling_group csg, maintenance_scheduling_group msg, size_t available_memory, uint64_t shares, abort_source& as);
|
||||
compaction_manager();
|
||||
~compaction_manager();
|
||||
class for_testing_tag{};
|
||||
// An inline constructor for testing
|
||||
compaction_manager(for_testing_tag) : compaction_manager() {}
|
||||
|
||||
const scheduling_group& compaction_sg() const noexcept {
|
||||
return _cfg.compaction_sched_group;
|
||||
}
|
||||
|
||||
const scheduling_group& maintenance_sg() const noexcept {
|
||||
return _cfg.maintenance_sched_group;
|
||||
}
|
||||
|
||||
size_t available_memory() const noexcept {
|
||||
return _cfg.available_memory;
|
||||
}
|
||||
|
||||
float static_shares() const noexcept {
|
||||
return _cfg.static_shares.get();
|
||||
}
|
||||
|
||||
uint32_t throughput_mbs() const noexcept {
|
||||
return _cfg.throughput_mb_per_sec.get();
|
||||
}
|
||||
|
||||
void register_metrics();
|
||||
|
||||
// enable the compaction manager.
|
||||
// enable/disable compaction manager.
|
||||
void enable();
|
||||
void disable();
|
||||
|
||||
// Stop all fibers. Ongoing compactions will be waited. Should only be called
|
||||
// once, from main teardown path.
|
||||
@@ -410,19 +233,15 @@ public:
|
||||
// unless it is moved back to enabled state.
|
||||
future<> drain();
|
||||
|
||||
// Stop all fibers, without waiting. Safe to be called multiple times.
|
||||
void do_stop() noexcept;
|
||||
future<> really_do_stop();
|
||||
|
||||
// Submit a table to be compacted.
|
||||
void submit(compaction::table_state& t);
|
||||
|
||||
// Can regular compaction be performed in the given table
|
||||
bool can_perform_regular_compaction(compaction::table_state& t);
|
||||
|
||||
// Maybe wait before adding more sstables
|
||||
// if there are too many sstables.
|
||||
future<> maybe_wait_for_sstable_count_reduction(compaction::table_state& t);
|
||||
void submit(replica::table* t);
|
||||
|
||||
// Submit a table to be off-strategy compacted.
|
||||
// Returns true iff off-strategy compaction was required and performed.
|
||||
future<bool> perform_offstrategy(compaction::table_state& t);
|
||||
future<> perform_offstrategy(replica::table* t);
|
||||
|
||||
// Submit a table to be cleaned up and wait for its termination.
|
||||
//
|
||||
@@ -431,16 +250,16 @@ public:
|
||||
// Cleanup is about discarding keys that are no longer relevant for a
|
||||
// given sstable, e.g. after node loses part of its token range because
|
||||
// of a newly added node.
|
||||
future<> perform_cleanup(owned_ranges_ptr sorted_owned_ranges, compaction::table_state& t);
|
||||
future<> perform_cleanup(replica::database& db, replica::table* t);
|
||||
|
||||
// Submit a table to be upgraded and wait for its termination.
|
||||
future<> perform_sstable_upgrade(owned_ranges_ptr sorted_owned_ranges, compaction::table_state& t, bool exclude_current_version);
|
||||
future<> perform_sstable_upgrade(replica::database& db, replica::table* t, bool exclude_current_version);
|
||||
|
||||
// Submit a table to be scrubbed and wait for its termination.
|
||||
future<compaction_stats_opt> perform_sstable_scrub(compaction::table_state& t, sstables::compaction_type_options::scrub opts);
|
||||
future<> perform_sstable_scrub(replica::table* t, sstables::compaction_type_options::scrub opts);
|
||||
|
||||
// Submit a table for major compaction.
|
||||
future<> perform_major_compaction(compaction::table_state& t);
|
||||
future<> perform_major_compaction(replica::table* t);
|
||||
|
||||
|
||||
// Run a custom job for a given table, defined by a function
|
||||
@@ -450,61 +269,67 @@ public:
|
||||
// parameter type is the compaction type the operation can most closely be
|
||||
// associated with, use compaction_type::Compaction, if none apply.
|
||||
// parameter job is a function that will carry the operation
|
||||
future<> run_custom_job(compaction::table_state& s, sstables::compaction_type type, const char *desc, noncopyable_function<future<>(sstables::compaction_data&)> job, throw_if_stopping do_throw_if_stopping);
|
||||
future<> run_custom_job(replica::table* t, sstables::compaction_type type, noncopyable_function<future<>(sstables::compaction_data&)> job);
|
||||
|
||||
class compaction_reenabler {
|
||||
compaction_manager& _cm;
|
||||
compaction::table_state* _table;
|
||||
compaction_manager::compaction_state& _compaction_state;
|
||||
replica::table* _table;
|
||||
compaction_state& _compaction_state;
|
||||
gate::holder _holder;
|
||||
|
||||
public:
|
||||
compaction_reenabler(compaction_manager&, compaction::table_state&);
|
||||
compaction_reenabler(compaction_manager&, replica::table*);
|
||||
compaction_reenabler(compaction_reenabler&&) noexcept;
|
||||
|
||||
~compaction_reenabler();
|
||||
|
||||
compaction::table_state* compacting_table() const noexcept {
|
||||
replica::table* compacting_table() const noexcept {
|
||||
return _table;
|
||||
}
|
||||
|
||||
const compaction_manager::compaction_state& compaction_state() const noexcept {
|
||||
const compaction_state& compaction_state() const noexcept {
|
||||
return _compaction_state;
|
||||
}
|
||||
};
|
||||
|
||||
// Disable compaction temporarily for a table t.
|
||||
// Caller should call the compaction_reenabler::reenable
|
||||
future<compaction_reenabler> stop_and_disable_compaction(compaction::table_state& t);
|
||||
future<compaction_reenabler> stop_and_disable_compaction(replica::table* t);
|
||||
|
||||
// Run a function with compaction temporarily disabled for a table T.
|
||||
future<> run_with_compaction_disabled(compaction::table_state& t, std::function<future<> ()> func);
|
||||
future<> run_with_compaction_disabled(replica::table* t, std::function<future<> ()> func);
|
||||
|
||||
// Adds a table to the compaction manager.
|
||||
// Creates a compaction_state structure that can be used for submitting
|
||||
// compaction jobs of all types.
|
||||
void add(compaction::table_state& t);
|
||||
void add(replica::table* t);
|
||||
|
||||
// Remove a table from the compaction manager.
|
||||
// Cancel requests on table and wait for possible ongoing compactions.
|
||||
future<> remove(compaction::table_state& t);
|
||||
future<> remove(replica::table* t);
|
||||
|
||||
const stats& get_stats() const {
|
||||
return _stats;
|
||||
}
|
||||
|
||||
const std::vector<sstables::compaction_info> get_compactions(compaction::table_state* t = nullptr) const;
|
||||
const std::vector<sstables::compaction_info> get_compactions(replica::table* t = nullptr) const;
|
||||
|
||||
// Returns true if table has an ongoing compaction, running on its behalf
|
||||
bool has_table_ongoing_compaction(const compaction::table_state& t) const;
|
||||
bool has_table_ongoing_compaction(const replica::table* t) const {
|
||||
return std::any_of(_tasks.begin(), _tasks.end(), [t] (const lw_shared_ptr<task>& task) {
|
||||
return task->compacting_table == t && task->compaction_running;
|
||||
});
|
||||
};
|
||||
|
||||
bool compaction_disabled(compaction::table_state& t) const;
|
||||
bool compaction_disabled(replica::table* t) const {
|
||||
return _compaction_state.contains(t) && _compaction_state.at(t).compaction_disabled();
|
||||
}
|
||||
|
||||
// Stops ongoing compaction of a given type.
|
||||
future<> stop_compaction(sstring type, compaction::table_state* table = nullptr);
|
||||
future<> stop_compaction(sstring type, replica::table* table = nullptr);
|
||||
|
||||
// Stops ongoing compaction of a given table and/or compaction_type.
|
||||
future<> stop_ongoing_compactions(sstring reason, compaction::table_state* t = nullptr, std::optional<sstables::compaction_type> type_opt = {});
|
||||
future<> stop_ongoing_compactions(sstring reason, replica::table* t = nullptr, std::optional<sstables::compaction_type> type_opt = {});
|
||||
|
||||
double backlog() {
|
||||
return _backlog_manager.backlog();
|
||||
@@ -514,6 +339,9 @@ public:
|
||||
_backlog_manager.register_backlog_tracker(backlog_tracker);
|
||||
}
|
||||
|
||||
// Propagate replacement of sstables to all ongoing compaction of a given table
|
||||
void propagate_replacement(replica::table* t, const std::vector<sstables::shared_sstable>& removed, const std::vector<sstables::shared_sstable>& added);
|
||||
|
||||
static sstables::compaction_data create_compaction_data();
|
||||
|
||||
compaction::strategy_control& get_strategy_control() const noexcept;
|
||||
@@ -525,8 +353,3 @@ public:
|
||||
|
||||
bool needs_cleanup(const sstables::shared_sstable& sst, const dht::token_range_vector& owned_ranges, schema_ptr s);
|
||||
|
||||
// Return all sstables but those that are off-strategy like the ones in maintenance set and staging dir.
|
||||
std::vector<sstables::shared_sstable> in_strategy_sstables(compaction::table_state& table_s);
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, compaction_manager::task::state s);
|
||||
std::ostream& operator<<(std::ostream& os, const compaction_manager::task& task);
|
||||
|
||||
@@ -36,18 +36,8 @@ logging::logger leveled_manifest::logger("LeveledManifest");
|
||||
|
||||
namespace sstables {
|
||||
|
||||
compaction_descriptor compaction_strategy_impl::make_major_compaction_job(std::vector<sstables::shared_sstable> candidates, int level, uint64_t max_sstable_bytes) {
|
||||
// run major compaction in maintenance priority
|
||||
return compaction_descriptor(std::move(candidates), service::get_local_streaming_priority(), level, max_sstable_bytes);
|
||||
}
|
||||
|
||||
std::vector<compaction_descriptor> compaction_strategy_impl::get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const {
|
||||
// The default implementation is suboptimal and causes the writeamp problem described issue in #10097.
|
||||
// The compaction strategy relying on it should strive to implement its own method, to make cleanup bucket aware.
|
||||
return boost::copy_range<std::vector<compaction_descriptor>>(candidates | boost::adaptors::transformed([] (const shared_sstable& sst) {
|
||||
return compaction_descriptor({ sst }, service::get_local_compaction_priority(),
|
||||
sst->get_sstable_level(), sstables::compaction_descriptor::default_max_sstable_bytes, sst->run_identifier());
|
||||
}));
|
||||
compaction_descriptor compaction_strategy_impl::get_major_compaction_job(table_state& table_s, std::vector<sstables::shared_sstable> candidates) {
|
||||
return compaction_descriptor(std::move(candidates), table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
bool compaction_strategy_impl::worth_dropping_tombstones(const shared_sstable& sst, gc_clock::time_point compaction_time) {
|
||||
@@ -101,16 +91,23 @@ compaction_strategy_impl::compaction_strategy_impl(const std::map<sstring, sstri
|
||||
|
||||
} // namespace sstables
|
||||
|
||||
size_tiered_backlog_tracker::inflight_component
|
||||
size_tiered_backlog_tracker::partial_backlog(const compaction_backlog_tracker::ongoing_writes& ongoing_writes) const {
|
||||
inflight_component in;
|
||||
for (auto const& swp : ongoing_writes) {
|
||||
auto written = swp.second->written();
|
||||
if (written > 0) {
|
||||
in.total_bytes += written;
|
||||
in.contribution += written * log4(written);
|
||||
}
|
||||
}
|
||||
return in;
|
||||
}
|
||||
|
||||
size_tiered_backlog_tracker::inflight_component
|
||||
size_tiered_backlog_tracker::compacted_backlog(const compaction_backlog_tracker::ongoing_compactions& ongoing_compactions) const {
|
||||
inflight_component in;
|
||||
for (auto const& crp : ongoing_compactions) {
|
||||
// A SSTable being compacted may not contribute to backlog if compaction strategy decided
|
||||
// to perform a low-efficiency compaction when system is under little load, or when user
|
||||
// performs major even though strategy is completely satisfied
|
||||
if (!_sstables_contributing_backlog.contains(crp.first)) {
|
||||
continue;
|
||||
}
|
||||
auto compacted = crp.second->compacted();
|
||||
in.total_bytes += compacted;
|
||||
in.contribution += compacted * log4(crp.first->data_size());
|
||||
@@ -118,75 +115,34 @@ size_tiered_backlog_tracker::compacted_backlog(const compaction_backlog_tracker:
|
||||
return in;
|
||||
}
|
||||
|
||||
void size_tiered_backlog_tracker::refresh_sstables_backlog_contribution() {
|
||||
_sstables_backlog_contribution = 0.0f;
|
||||
_sstables_contributing_backlog = {};
|
||||
if (_all.empty()) {
|
||||
return;
|
||||
}
|
||||
using namespace sstables;
|
||||
|
||||
// Deduce threshold from the last SSTable added to the set
|
||||
// Low-efficiency jobs, which fan-in is smaller than min-threshold, will not have backlog accounted.
|
||||
// That's because they can only run when system is under little load, and accounting them would result
|
||||
// in efficient jobs acting more aggressive than they really have to.
|
||||
// TODO: potentially switch to compaction manager's fan-in threshold, so to account for the dynamic
|
||||
// fan-in threshold behavior.
|
||||
const auto& newest_sst = std::ranges::max(_all, std::less<generation_type>(), std::mem_fn(&sstable::generation));
|
||||
auto threshold = newest_sst->get_schema()->min_compaction_threshold();
|
||||
|
||||
for (auto& bucket : size_tiered_compaction_strategy::get_buckets(boost::copy_range<std::vector<shared_sstable>>(_all), _stcs_options)) {
|
||||
if (!size_tiered_compaction_strategy::is_bucket_interesting(bucket, threshold)) {
|
||||
continue;
|
||||
}
|
||||
_sstables_backlog_contribution += boost::accumulate(bucket | boost::adaptors::transformed([this] (const shared_sstable& sst) -> double {
|
||||
return sst->data_size() * log4(sst->data_size());
|
||||
}), double(0.0f));
|
||||
// Controller is disabled if exception is caught during add / remove calls, so not making any effort to make this exception safe
|
||||
_sstables_contributing_backlog.insert(bucket.begin(), bucket.end());
|
||||
}
|
||||
}
|
||||
|
||||
double size_tiered_backlog_tracker::backlog(const compaction_backlog_tracker::ongoing_writes& ow, const compaction_backlog_tracker::ongoing_compactions& oc) const {
|
||||
inflight_component partial = partial_backlog(ow);
|
||||
inflight_component compacted = compacted_backlog(oc);
|
||||
|
||||
auto total_backlog_bytes = boost::accumulate(_sstables_contributing_backlog | boost::adaptors::transformed(std::mem_fn(&sstables::sstable::data_size)), uint64_t(0));
|
||||
|
||||
// Bail out if effective backlog is zero, which happens in a small window where ongoing compaction exhausted
|
||||
// input files but is still sealing output files or doing managerial stuff like updating history table
|
||||
if (total_backlog_bytes <= compacted.total_bytes) {
|
||||
auto effective_total_size = _total_bytes + partial.total_bytes - compacted.total_bytes;
|
||||
if ((effective_total_size <= 0)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Formula for each SSTable is (Si - Ci) * log(T / Si)
|
||||
// Which can be rewritten as: ((Si - Ci) * log(T)) - ((Si - Ci) * log(Si))
|
||||
//
|
||||
// For the meaning of each variable, please refer to the doc in size_tiered_backlog_tracker.hh
|
||||
|
||||
// Sum of (Si - Ci) for all SSTables contributing backlog
|
||||
auto effective_backlog_bytes = total_backlog_bytes - compacted.total_bytes;
|
||||
|
||||
// Sum of (Si - Ci) * log (Si) for all SSTables contributing backlog
|
||||
auto sstables_contribution = _sstables_backlog_contribution - compacted.contribution;
|
||||
// This is subtracting ((Si - Ci) * log (Si)) from ((Si - Ci) * log(T)), yielding the final backlog
|
||||
auto b = (effective_backlog_bytes * log4(_total_bytes)) - sstables_contribution;
|
||||
if (_total_bytes == 0) {
|
||||
return 0;
|
||||
}
|
||||
auto sstables_contribution = _sstables_backlog_contribution + partial.contribution - compacted.contribution;
|
||||
auto b = (effective_total_size * log4(_total_bytes)) - sstables_contribution;
|
||||
return b > 0 ? b : 0;
|
||||
}
|
||||
|
||||
void size_tiered_backlog_tracker::replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) {
|
||||
for (auto& sst : old_ssts) {
|
||||
if (sst->data_size() > 0) {
|
||||
_total_bytes -= sst->data_size();
|
||||
_all.erase(sst);
|
||||
}
|
||||
void size_tiered_backlog_tracker::add_sstable(sstables::shared_sstable sst) {
|
||||
if (sst->data_size() > 0) {
|
||||
_total_bytes += sst->data_size();
|
||||
_sstables_backlog_contribution += sst->data_size() * log4(sst->data_size());
|
||||
}
|
||||
for (auto& sst : new_ssts) {
|
||||
if (sst->data_size() > 0) {
|
||||
_total_bytes += sst->data_size();
|
||||
_all.insert(std::move(sst));
|
||||
}
|
||||
}
|
||||
|
||||
void size_tiered_backlog_tracker::remove_sstable(sstables::shared_sstable sst) {
|
||||
if (sst->data_size() > 0) {
|
||||
_total_bytes -= sst->data_size();
|
||||
_sstables_backlog_contribution -= sst->data_size() * log4(sst->data_size());
|
||||
}
|
||||
refresh_sstables_backlog_contribution();
|
||||
}
|
||||
|
||||
namespace sstables {
|
||||
@@ -203,7 +159,6 @@ extern logging::logger clogger;
|
||||
// a new object for the partial write at this time.
|
||||
class time_window_backlog_tracker final : public compaction_backlog_tracker::impl {
|
||||
time_window_compaction_strategy_options _twcs_options;
|
||||
size_tiered_compaction_strategy_options _stcs_options;
|
||||
std::unordered_map<api::timestamp_type, size_tiered_backlog_tracker> _windows;
|
||||
|
||||
api::timestamp_type lower_bound_of(api::timestamp_type timestamp) const {
|
||||
@@ -211,9 +166,8 @@ class time_window_backlog_tracker final : public compaction_backlog_tracker::imp
|
||||
return time_window_compaction_strategy::get_window_lower_bound(_twcs_options.sstable_window_size, ts);
|
||||
}
|
||||
public:
|
||||
time_window_backlog_tracker(time_window_compaction_strategy_options twcs_options, size_tiered_compaction_strategy_options stcs_options)
|
||||
: _twcs_options(twcs_options)
|
||||
, _stcs_options(stcs_options)
|
||||
time_window_backlog_tracker(time_window_compaction_strategy_options options)
|
||||
: _twcs_options(options)
|
||||
{}
|
||||
|
||||
virtual double backlog(const compaction_backlog_tracker::ongoing_writes& ow, const compaction_backlog_tracker::ongoing_compactions& oc) const override {
|
||||
@@ -259,39 +213,23 @@ public:
|
||||
|
||||
// Partial writes that don't belong to any window are accounted here.
|
||||
for (auto& current : writes_per_window) {
|
||||
b += size_tiered_backlog_tracker(_stcs_options).backlog(current.second, no_oc);
|
||||
b += size_tiered_backlog_tracker().backlog(current.second, no_oc);
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
virtual void replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) override {
|
||||
struct replacement {
|
||||
std::vector<sstables::shared_sstable> old_ssts;
|
||||
std::vector<sstables::shared_sstable> new_ssts;
|
||||
};
|
||||
std::unordered_map<api::timestamp_type, replacement> per_window_replacement;
|
||||
virtual void add_sstable(sstables::shared_sstable sst) override {
|
||||
auto bound = lower_bound_of(sst->get_stats_metadata().max_timestamp);
|
||||
_windows[bound].add_sstable(sst);
|
||||
}
|
||||
|
||||
for (auto& sst : new_ssts) {
|
||||
auto bound = lower_bound_of(sst->get_stats_metadata().max_timestamp);
|
||||
if (!_windows.contains(bound)) {
|
||||
_windows.emplace(bound, size_tiered_backlog_tracker(_stcs_options));
|
||||
}
|
||||
per_window_replacement[bound].new_ssts.push_back(std::move(sst));
|
||||
}
|
||||
for (auto& sst : old_ssts) {
|
||||
auto bound = lower_bound_of(sst->get_stats_metadata().max_timestamp);
|
||||
if (_windows.contains(bound)) {
|
||||
per_window_replacement[bound].old_ssts.push_back(std::move(sst));
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& [bound, r] : per_window_replacement) {
|
||||
// All windows must exist here, as windows are created for new files and will
|
||||
// remain alive as long as there's a single file in them
|
||||
auto& w = _windows.at(bound);
|
||||
w.replace_sstables(std::move(r.old_ssts), std::move(r.new_ssts));
|
||||
if (w.total_bytes() <= 0) {
|
||||
_windows.erase(bound);
|
||||
virtual void remove_sstable(sstables::shared_sstable sst) override {
|
||||
auto bound = lower_bound_of(sst->get_stats_metadata().max_timestamp);
|
||||
auto it = _windows.find(bound);
|
||||
if (it != _windows.end()) {
|
||||
it->second.remove_sstable(sst);
|
||||
if (it->second.total_bytes() <= 0) {
|
||||
_windows.erase(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -304,9 +242,8 @@ class leveled_compaction_backlog_tracker final : public compaction_backlog_track
|
||||
std::vector<uint64_t> _size_per_level;
|
||||
uint64_t _max_sstable_size;
|
||||
public:
|
||||
leveled_compaction_backlog_tracker(int32_t max_sstable_size_in_mb, size_tiered_compaction_strategy_options stcs_options)
|
||||
: _l0_scts(stcs_options)
|
||||
, _size_per_level(leveled_manifest::MAX_LEVELS, uint64_t(0))
|
||||
leveled_compaction_backlog_tracker(int32_t max_sstable_size_in_mb)
|
||||
: _size_per_level(leveled_manifest::MAX_LEVELS, uint64_t(0))
|
||||
, _max_sstable_size(max_sstable_size_in_mb * 1024 * 1024)
|
||||
{}
|
||||
|
||||
@@ -332,85 +269,38 @@ public:
|
||||
}
|
||||
|
||||
double b = _l0_scts.backlog(l0_partial_writes, l0_compacted);
|
||||
|
||||
size_t max_populated_level = [&effective_size_per_level] () -> size_t {
|
||||
auto it = std::find_if(effective_size_per_level.rbegin(), effective_size_per_level.rend(), [] (uint64_t s) {
|
||||
return s != 0;
|
||||
});
|
||||
if (it == effective_size_per_level.rend()) {
|
||||
return 0;
|
||||
}
|
||||
return std::distance(it, effective_size_per_level.rend()) - 1;
|
||||
}();
|
||||
|
||||
// The LCS goal is to achieve a layout where for every level L, sizeof(L+1) >= (sizeof(L) * fan_out)
|
||||
// If table size is S, which is the sum of size of all levels, the target size of the highest level
|
||||
// is S % 1.111, where 1.111 refers to strategy's space amplification goal.
|
||||
// As level L is fan_out times smaller than L+1, level L-1 is fan_out^2 times smaller than L+1,
|
||||
// and so on, the target size of any level can be easily calculated.
|
||||
|
||||
static constexpr auto fan_out = leveled_manifest::leveled_fan_out;
|
||||
static constexpr double space_amplification_goal = 1.111;
|
||||
uint64_t total_size = std::accumulate(effective_size_per_level.begin(), effective_size_per_level.end(), uint64_t(0));
|
||||
uint64_t target_max_level_size = std::ceil(total_size / space_amplification_goal);
|
||||
|
||||
auto target_level_size = [&] (size_t level) {
|
||||
auto r = std::ceil(target_max_level_size / std::pow(fan_out, max_populated_level - level));
|
||||
return std::max(uint64_t(r), _max_sstable_size);
|
||||
};
|
||||
|
||||
// The backlog for a level L is the amount of bytes to be compacted, such that:
|
||||
// sizeof(L) <= sizeof(L+1) * fan_out
|
||||
// If we start from L0, then L0 backlog is (sizeof(L0) - target_sizeof(L0)) * fan_out, where
|
||||
// (sizeof(L0) - target_sizeof(L0)) is the amount of data to be promoted into next level
|
||||
// By summing the backlog for each level, we get the total amount of work for all levels to
|
||||
// reach their target size.
|
||||
for (size_t level = 0; level < max_populated_level; ++level) {
|
||||
// Backlog for a level: size_of_level * (max_level - n) * fan_out
|
||||
//
|
||||
// The fan_out is usually 10. But if the level above us is not
|
||||
// fully populated-- which can happen when a level is still being born, we don't want that
|
||||
// to jump abruptly. So what we will do instead is to define the fan out as the minimum
|
||||
// between 10 and the number of sstables that are estimated to be there.
|
||||
//
|
||||
// Because of that, it's easier to write this code as an accumulator loop. If we are level
|
||||
// L, for each level L + n, n > 0, we accumulate sizeof(L) * fan_out_of(L+n)
|
||||
for (size_t level = 0; level < _size_per_level.size() - 1; ++level) {
|
||||
auto lsize = effective_size_per_level[level];
|
||||
auto target_lsize = target_level_size(level);
|
||||
|
||||
// Current level satisfies the goal, skip to the next one.
|
||||
if (lsize <= target_lsize) {
|
||||
continue;
|
||||
for (size_t next = level + 1; next < _size_per_level.size() - 1; ++next) {
|
||||
auto lsize_next = effective_size_per_level[next];
|
||||
b += std::min(double(leveled_manifest::leveled_fan_out), double(lsize_next) / _max_sstable_size) * lsize;
|
||||
}
|
||||
auto next_level = level + 1;
|
||||
auto bytes_for_next_level = lsize - target_lsize;
|
||||
|
||||
// The fan_out is usually 10. But if the level above us is not fully populated -- which
|
||||
// can happen when a level is still being born, we don't want that to jump abruptly.
|
||||
// So what we will do instead is to define the fan out as the minimum between 10
|
||||
// and the number of sstables that are estimated to be there.
|
||||
unsigned estimated_next_level_ssts = (effective_size_per_level[next_level] + _max_sstable_size - 1) / _max_sstable_size;
|
||||
auto estimated_fan_out = std::min(fan_out, estimated_next_level_ssts);
|
||||
|
||||
b += bytes_for_next_level * estimated_fan_out;
|
||||
|
||||
// Update size of next level, as data from current level can be promoted as many times
|
||||
// as needed, and therefore needs to be included in backlog calculation for the next
|
||||
// level, if needed.
|
||||
effective_size_per_level[next_level] += bytes_for_next_level;
|
||||
}
|
||||
return b;
|
||||
}
|
||||
|
||||
virtual void replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) override {
|
||||
std::vector<sstables::shared_sstable> l0_old_ssts, l0_new_ssts;
|
||||
for (auto& sst : new_ssts) {
|
||||
auto level = sst->get_sstable_level();
|
||||
_size_per_level[level] += sst->data_size();
|
||||
if (level == 0) {
|
||||
l0_new_ssts.push_back(std::move(sst));
|
||||
}
|
||||
virtual void add_sstable(sstables::shared_sstable sst) override {
|
||||
auto level = sst->get_sstable_level();
|
||||
_size_per_level[level] += sst->data_size();
|
||||
if (level == 0) {
|
||||
_l0_scts.add_sstable(sst);
|
||||
}
|
||||
for (auto& sst : old_ssts) {
|
||||
auto level = sst->get_sstable_level();
|
||||
_size_per_level[level] -= sst->data_size();
|
||||
if (level == 0) {
|
||||
l0_old_ssts.push_back(std::move(sst));
|
||||
}
|
||||
}
|
||||
if (l0_old_ssts.size() || l0_new_ssts.size()) {
|
||||
_l0_scts.replace_sstables(std::move(l0_old_ssts), std::move(l0_new_ssts));
|
||||
}
|
||||
|
||||
virtual void remove_sstable(sstables::shared_sstable sst) override {
|
||||
auto level = sst->get_sstable_level();
|
||||
_size_per_level[level] -= sst->data_size();
|
||||
if (level == 0) {
|
||||
_l0_scts.remove_sstable(sst);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -419,14 +309,16 @@ struct unimplemented_backlog_tracker final : public compaction_backlog_tracker::
|
||||
virtual double backlog(const compaction_backlog_tracker::ongoing_writes& ow, const compaction_backlog_tracker::ongoing_compactions& oc) const override {
|
||||
return compaction_controller::disable_backlog;
|
||||
}
|
||||
virtual void replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) override {}
|
||||
virtual void add_sstable(sstables::shared_sstable sst) override { }
|
||||
virtual void remove_sstable(sstables::shared_sstable sst) override { }
|
||||
};
|
||||
|
||||
struct null_backlog_tracker final : public compaction_backlog_tracker::impl {
|
||||
virtual double backlog(const compaction_backlog_tracker::ongoing_writes& ow, const compaction_backlog_tracker::ongoing_compactions& oc) const override {
|
||||
return 0;
|
||||
}
|
||||
virtual void replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) override {}
|
||||
virtual void add_sstable(sstables::shared_sstable sst) override { }
|
||||
virtual void remove_sstable(sstables::shared_sstable sst) override { }
|
||||
};
|
||||
|
||||
// Just so that if we have more than one CF with NullStrategy, we don't create a lot
|
||||
@@ -464,7 +356,7 @@ leveled_compaction_strategy::leveled_compaction_strategy(const std::map<sstring,
|
||||
: compaction_strategy_impl(options)
|
||||
, _max_sstable_size_in_mb(calculate_max_sstable_size_in_mb(compaction_strategy_impl::get_value(options, SSTABLE_SIZE_OPTION)))
|
||||
, _stcs_options(options)
|
||||
, _backlog_tracker(std::make_unique<leveled_compaction_backlog_tracker>(_max_sstable_size_in_mb, _stcs_options))
|
||||
, _backlog_tracker(std::make_unique<leveled_compaction_backlog_tracker>(_max_sstable_size_in_mb))
|
||||
{
|
||||
_compaction_counter.resize(leveled_manifest::MAX_LEVELS);
|
||||
}
|
||||
@@ -488,7 +380,7 @@ time_window_compaction_strategy::time_window_compaction_strategy(const std::map<
|
||||
: compaction_strategy_impl(options)
|
||||
, _options(options)
|
||||
, _stcs_options(options)
|
||||
, _backlog_tracker(std::make_unique<time_window_backlog_tracker>(_options, _stcs_options))
|
||||
, _backlog_tracker(std::make_unique<time_window_backlog_tracker>(_options))
|
||||
{
|
||||
if (!options.contains(TOMBSTONE_COMPACTION_INTERVAL_OPTION) && !options.contains(TOMBSTONE_THRESHOLD_OPTION)) {
|
||||
_disable_tombstone_compaction = true;
|
||||
@@ -503,7 +395,7 @@ time_window_compaction_strategy::time_window_compaction_strategy(const std::map<
|
||||
|
||||
std::vector<sstables::shared_sstable>
|
||||
date_tiered_manifest::get_next_sstables(table_state& table_s, std::vector<sstables::shared_sstable>& uncompacting, gc_clock::time_point compaction_time) {
|
||||
if (table_s.main_sstable_set().all()->empty()) {
|
||||
if (table_s.get_sstable_set().all()->empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
@@ -524,11 +416,11 @@ date_tiered_manifest::get_next_sstables(table_state& table_s, std::vector<sstabl
|
||||
|
||||
int64_t date_tiered_manifest::get_estimated_tasks(table_state& table_s) const {
|
||||
int base = table_s.schema()->min_compaction_threshold();
|
||||
int64_t now = get_now(table_s.main_sstable_set().all());
|
||||
int64_t now = get_now(table_s.get_sstable_set().all());
|
||||
std::vector<sstables::shared_sstable> sstables;
|
||||
int64_t n = 0;
|
||||
|
||||
auto all_sstables = table_s.main_sstable_set().all();
|
||||
auto all_sstables = table_s.get_sstable_set().all();
|
||||
sstables.reserve(all_sstables->size());
|
||||
for (auto& entry : *all_sstables) {
|
||||
sstables.push_back(entry);
|
||||
@@ -547,7 +439,7 @@ int64_t date_tiered_manifest::get_estimated_tasks(table_state& table_s) const {
|
||||
std::vector<sstables::shared_sstable>
|
||||
date_tiered_manifest::get_next_non_expired_sstables(table_state& table_s, std::vector<sstables::shared_sstable>& non_expiring_sstables, gc_clock::time_point compaction_time) {
|
||||
int base = table_s.schema()->min_compaction_threshold();
|
||||
int64_t now = get_now(table_s.main_sstable_set().all());
|
||||
int64_t now = get_now(table_s.get_sstable_set().all());
|
||||
auto most_interesting = get_compaction_candidates(table_s, non_expiring_sstables, now, base);
|
||||
|
||||
return most_interesting;
|
||||
@@ -668,7 +560,7 @@ compaction_descriptor date_tiered_compaction_strategy::get_sstables_for_compacti
|
||||
|
||||
if (!sstables.empty()) {
|
||||
date_tiered_manifest::logger.debug("datetiered: Compacting {} out of {} sstables", sstables.size(), candidates.size());
|
||||
return sstables::compaction_descriptor(std::move(sstables), service::get_local_compaction_priority());
|
||||
return sstables::compaction_descriptor(std::move(sstables), table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
// filter out sstables which droppable tombstone ratio isn't greater than the defined threshold.
|
||||
@@ -684,18 +576,18 @@ compaction_descriptor date_tiered_compaction_strategy::get_sstables_for_compacti
|
||||
auto it = std::min_element(candidates.begin(), candidates.end(), [] (auto& i, auto& j) {
|
||||
return i->get_stats_metadata().min_timestamp < j->get_stats_metadata().min_timestamp;
|
||||
});
|
||||
return sstables::compaction_descriptor({ *it }, service::get_local_compaction_priority());
|
||||
return sstables::compaction_descriptor({ *it }, table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
size_tiered_compaction_strategy::size_tiered_compaction_strategy(const std::map<sstring, sstring>& options)
|
||||
: compaction_strategy_impl(options)
|
||||
, _options(options)
|
||||
, _backlog_tracker(std::make_unique<size_tiered_backlog_tracker>(_options))
|
||||
, _backlog_tracker(std::make_unique<size_tiered_backlog_tracker>())
|
||||
{}
|
||||
|
||||
size_tiered_compaction_strategy::size_tiered_compaction_strategy(const size_tiered_compaction_strategy_options& options)
|
||||
: _options(options)
|
||||
, _backlog_tracker(std::make_unique<size_tiered_backlog_tracker>(_options))
|
||||
, _backlog_tracker(std::make_unique<size_tiered_backlog_tracker>())
|
||||
{}
|
||||
|
||||
compaction_strategy::compaction_strategy(::shared_ptr<compaction_strategy_impl> impl)
|
||||
@@ -718,10 +610,6 @@ compaction_descriptor compaction_strategy::get_major_compaction_job(table_state&
|
||||
return _compaction_strategy_impl->get_major_compaction_job(table_s, std::move(candidates));
|
||||
}
|
||||
|
||||
std::vector<compaction_descriptor> compaction_strategy::get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const {
|
||||
return _compaction_strategy_impl->get_cleanup_compaction_jobs(table_s, std::move(candidates));
|
||||
}
|
||||
|
||||
void compaction_strategy::notify_completion(const std::vector<shared_sstable>& removed, const std::vector<shared_sstable>& added) {
|
||||
_compaction_strategy_impl->notify_completion(removed, added);
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include "sstables/shared_sstable.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "compaction_strategy_type.hh"
|
||||
#include "flat_mutation_reader.hh"
|
||||
#include "table_state.hh"
|
||||
#include "strategy_control.hh"
|
||||
|
||||
@@ -48,8 +49,6 @@ public:
|
||||
|
||||
compaction_descriptor get_major_compaction_job(table_state& table_s, std::vector<shared_sstable> candidates);
|
||||
|
||||
std::vector<compaction_descriptor> get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const;
|
||||
|
||||
// Some strategies may look at the compacted and resulting sstables to
|
||||
// get some useful information for subsequent compactions.
|
||||
void notify_completion(const std::vector<shared_sstable>& removed, const std::vector<shared_sstable>& added);
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
#include "compaction_backlog_manager.hh"
|
||||
#include "compaction_strategy.hh"
|
||||
#include "db_clock.hh"
|
||||
#include "compaction_descriptor.hh"
|
||||
|
||||
namespace compaction {
|
||||
class table_state;
|
||||
@@ -24,6 +23,7 @@ namespace sstables {
|
||||
compaction_backlog_tracker& get_unimplemented_backlog_tracker();
|
||||
|
||||
class sstable_set_impl;
|
||||
class compaction_descriptor;
|
||||
class resharding_descriptor;
|
||||
|
||||
class compaction_strategy_impl {
|
||||
@@ -43,16 +43,10 @@ public:
|
||||
protected:
|
||||
compaction_strategy_impl() = default;
|
||||
explicit compaction_strategy_impl(const std::map<sstring, sstring>& options);
|
||||
static compaction_descriptor make_major_compaction_job(std::vector<sstables::shared_sstable> candidates,
|
||||
int level = compaction_descriptor::default_level,
|
||||
uint64_t max_sstable_bytes = compaction_descriptor::default_max_sstable_bytes);
|
||||
public:
|
||||
virtual ~compaction_strategy_impl() {}
|
||||
virtual compaction_descriptor get_sstables_for_compaction(table_state& table_s, strategy_control& control, std::vector<sstables::shared_sstable> candidates) = 0;
|
||||
virtual compaction_descriptor get_major_compaction_job(table_state& table_s, std::vector<sstables::shared_sstable> candidates) {
|
||||
return make_major_compaction_job(std::move(candidates));
|
||||
}
|
||||
virtual std::vector<compaction_descriptor> get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const;
|
||||
virtual compaction_descriptor get_major_compaction_job(table_state& table_s, std::vector<sstables::shared_sstable> candidates);
|
||||
virtual void notify_completion(const std::vector<shared_sstable>& removed, const std::vector<shared_sstable>& added) { }
|
||||
virtual compaction_strategy_type type() const = 0;
|
||||
virtual bool parallel_compaction() const {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2016-present-2017 ScyllaDB
|
||||
*
|
||||
|
||||
@@ -27,7 +27,7 @@ compaction_descriptor leveled_compaction_strategy::get_sstables_for_compaction(t
|
||||
auto candidate = manifest.get_compaction_candidates(*_last_compacted_keys, _compaction_counter);
|
||||
|
||||
if (!candidate.sstables.empty()) {
|
||||
leveled_manifest::logger.debug("leveled: Compacting {} out of {} sstables", candidate.sstables.size(), table_s.main_sstable_set().all()->size());
|
||||
leveled_manifest::logger.debug("leveled: Compacting {} out of {} sstables", candidate.sstables.size(), table_s.get_sstable_set().all()->size());
|
||||
return candidate;
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ compaction_descriptor leveled_compaction_strategy::get_sstables_for_compaction(t
|
||||
auto gc_before2 = j->get_gc_before_for_drop_estimation(compaction_time);
|
||||
return i->estimate_droppable_tombstone_ratio(gc_before1) < j->estimate_droppable_tombstone_ratio(gc_before2);
|
||||
});
|
||||
return sstables::compaction_descriptor({ sst }, service::get_local_compaction_priority(), sst->get_sstable_level());
|
||||
return sstables::compaction_descriptor({ sst }, table_s.get_sstable_set(), service::get_local_compaction_priority(), sst->get_sstable_level());
|
||||
}
|
||||
return {};
|
||||
}
|
||||
@@ -61,10 +61,11 @@ compaction_descriptor leveled_compaction_strategy::get_major_compaction_job(tabl
|
||||
return compaction_descriptor();
|
||||
}
|
||||
|
||||
auto max_sstable_size_in_bytes = _max_sstable_size_in_mb*1024*1024;
|
||||
auto ideal_level = ideal_level_for_input(candidates, max_sstable_size_in_bytes);
|
||||
return make_major_compaction_job(std::move(candidates),
|
||||
ideal_level, max_sstable_size_in_bytes);
|
||||
auto& sst = *std::max_element(candidates.begin(), candidates.end(), [&] (sstables::shared_sstable& sst1, sstables::shared_sstable& sst2) {
|
||||
return sst1->get_sstable_level() < sst2->get_sstable_level();
|
||||
});
|
||||
return compaction_descriptor(std::move(candidates), table_s.get_sstable_set(), service::get_local_compaction_priority(),
|
||||
sst->get_sstable_level(), _max_sstable_size_in_mb*1024*1024);
|
||||
}
|
||||
|
||||
void leveled_compaction_strategy::notify_completion(const std::vector<shared_sstable>& removed, const std::vector<shared_sstable>& added) {
|
||||
@@ -125,7 +126,7 @@ void leveled_compaction_strategy::generate_last_compacted_keys(leveled_manifest&
|
||||
|
||||
int64_t leveled_compaction_strategy::estimated_pending_compactions(table_state& table_s) const {
|
||||
std::vector<sstables::shared_sstable> sstables;
|
||||
auto all_sstables = table_s.main_sstable_set().all();
|
||||
auto all_sstables = table_s.get_sstable_set().all();
|
||||
sstables.reserve(all_sstables->size());
|
||||
for (auto& entry : *all_sstables) {
|
||||
sstables.push_back(entry);
|
||||
@@ -144,16 +145,13 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
|
||||
auto max_sstable_size_in_bytes = _max_sstable_size_in_mb * 1024 * 1024;
|
||||
|
||||
leveled_manifest::logger.debug("get_reshaping_job: mode={} input.size={} max_sstable_size_in_bytes={}", mode == reshape_mode::relaxed ? "relaxed" : "strict", input.size(), max_sstable_size_in_bytes);
|
||||
|
||||
for (auto& sst : input) {
|
||||
auto sst_level = sst->get_sstable_level();
|
||||
if (sst_level > leveled_manifest::MAX_LEVELS - 1) {
|
||||
leveled_manifest::logger.warn("Found SSTable with level {}, higher than the maximum {}. This is unexpected, but will fix", sst_level, leveled_manifest::MAX_LEVELS - 1);
|
||||
|
||||
// This is really unexpected, so we'll just compact it all to fix it
|
||||
auto ideal_level = ideal_level_for_input(input, max_sstable_size_in_bytes);
|
||||
compaction_descriptor desc(std::move(input), iop, ideal_level, max_sstable_size_in_bytes);
|
||||
compaction_descriptor desc(std::move(input), std::optional<sstables::sstable_set>(), iop, leveled_manifest::MAX_LEVELS - 1, max_sstable_size_in_bytes);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -168,6 +166,8 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
});
|
||||
}
|
||||
|
||||
unsigned max_filled_level = 0;
|
||||
|
||||
size_t offstrategy_threshold = (mode == reshape_mode::strict) ? std::max(schema->min_compaction_threshold(), 4) : std::max(schema->max_compaction_threshold(), 32);
|
||||
size_t max_sstables = std::max(schema->max_compaction_threshold(), int(offstrategy_threshold));
|
||||
auto tolerance = [mode] (unsigned level) -> unsigned {
|
||||
@@ -182,10 +182,16 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
// The best possible level can be calculated with the formula: log (base fan_out) of (L0_total_bytes / max_sstable_size)
|
||||
auto [l0_disjoint, _] = is_disjoint(level_info[0], 0);
|
||||
if (mode == reshape_mode::strict && level_info[0].size() >= offstrategy_threshold && level_info[0].size() == input.size() && l0_disjoint) {
|
||||
unsigned ideal_level = ideal_level_for_input(level_info[0], max_sstable_size_in_bytes);
|
||||
auto log_fanout = [fanout = leveled_manifest::leveled_fan_out] (double x) {
|
||||
double inv_log_fanout = 1.0f / std::log(fanout);
|
||||
return log(x) * inv_log_fanout;
|
||||
};
|
||||
|
||||
auto total_bytes = std::max(leveled_manifest::get_total_bytes(level_info[0]), uint64_t(max_sstable_size_in_bytes));
|
||||
unsigned ideal_level = std::ceil(log_fanout(total_bytes / max_sstable_size_in_bytes));
|
||||
|
||||
leveled_manifest::logger.info("Reshaping {} disjoint sstables in level 0 into level {}", level_info[0].size(), ideal_level);
|
||||
compaction_descriptor desc(std::move(input), iop, ideal_level, max_sstable_size_in_bytes);
|
||||
compaction_descriptor desc(std::move(input), std::optional<sstables::sstable_set>(), iop, ideal_level, max_sstable_size_in_bytes);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -199,11 +205,13 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
if (level_info[level].empty()) {
|
||||
continue;
|
||||
}
|
||||
max_filled_level = std::max(max_filled_level, level);
|
||||
|
||||
auto [disjoint, overlapping_sstables] = is_disjoint(level_info[level], tolerance(level));
|
||||
if (!disjoint) {
|
||||
leveled_manifest::logger.warn("Turns out that level {} is not disjoint, found {} overlapping SSTables, so the level will be entirely compacted on behalf of {}.{}", level, overlapping_sstables, schema->ks_name(), schema->cf_name());
|
||||
compaction_descriptor desc(std::move(level_info[level]), iop, level, max_sstable_size_in_bytes);
|
||||
leveled_manifest::logger.warn("Turns out that level {} is not disjoint, found {} overlapping SSTables, so compacting everything on behalf of {}.{}", level, overlapping_sstables, schema->ks_name(), schema->cf_name());
|
||||
// Unfortunately no good limit to limit input size to max_sstables for LCS major
|
||||
compaction_descriptor desc(std::move(input), std::optional<sstables::sstable_set>(), iop, max_filled_level, max_sstable_size_in_bytes);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -212,32 +220,4 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
return compaction_descriptor();
|
||||
}
|
||||
|
||||
std::vector<compaction_descriptor>
|
||||
leveled_compaction_strategy::get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const {
|
||||
std::vector<compaction_descriptor> ret;
|
||||
|
||||
auto levels = leveled_manifest::get_levels(candidates);
|
||||
|
||||
ret = size_tiered_compaction_strategy(_stcs_options).get_cleanup_compaction_jobs(table_s, std::move(levels[0]));
|
||||
for (size_t level = 1; level < levels.size(); level++) {
|
||||
if (levels[level].empty()) {
|
||||
continue;
|
||||
}
|
||||
ret.push_back(compaction_descriptor(std::move(levels[level]), service::get_local_compaction_priority(), level, _max_sstable_size_in_mb * 1024 * 1024));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned leveled_compaction_strategy::ideal_level_for_input(const std::vector<sstables::shared_sstable>& input, uint64_t max_sstable_size) {
|
||||
if (!max_sstable_size) {
|
||||
return 1;
|
||||
}
|
||||
auto log_fanout = [fanout = leveled_manifest::leveled_fan_out] (double x) {
|
||||
double inv_log_fanout = 1.0f / std::log(fanout);
|
||||
return log(x) * inv_log_fanout;
|
||||
};
|
||||
uint64_t total_bytes = std::max(leveled_manifest::get_total_bytes(input), max_sstable_size);
|
||||
return std::ceil(log_fanout((total_bytes + max_sstable_size - 1) / max_sstable_size));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -38,13 +38,9 @@ class leveled_compaction_strategy : public compaction_strategy_impl {
|
||||
compaction_backlog_tracker _backlog_tracker;
|
||||
int32_t calculate_max_sstable_size_in_mb(std::optional<sstring> option_value) const;
|
||||
public:
|
||||
static unsigned ideal_level_for_input(const std::vector<sstables::shared_sstable>& input, uint64_t max_sstable_size);
|
||||
|
||||
leveled_compaction_strategy(const std::map<sstring, sstring>& options);
|
||||
virtual compaction_descriptor get_sstables_for_compaction(table_state& table_s, strategy_control& control, std::vector<sstables::shared_sstable> candidates) override;
|
||||
|
||||
virtual std::vector<compaction_descriptor> get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const override;
|
||||
|
||||
virtual compaction_descriptor get_major_compaction_job(table_state& table_s, std::vector<sstables::shared_sstable> candidates) override;
|
||||
|
||||
virtual void notify_completion(const std::vector<shared_sstable>& removed, const std::vector<shared_sstable>& added) override;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2015-present ScyllaDB
|
||||
*
|
||||
@@ -141,7 +144,7 @@ public:
|
||||
|
||||
|
||||
sstables::compaction_descriptor get_descriptor_for_level(int level, const std::vector<std::optional<dht::decorated_key>>& last_compacted_keys,
|
||||
const std::vector<int>& compaction_counter) {
|
||||
std::vector<int>& compaction_counter) {
|
||||
auto info = get_candidates_for(level, last_compacted_keys);
|
||||
if (!info.candidates.empty()) {
|
||||
int next_level = get_next_level(info.candidates, info.can_promote);
|
||||
@@ -149,7 +152,7 @@ public:
|
||||
if (info.can_promote) {
|
||||
info.candidates = get_overlapping_starved_sstables(next_level, std::move(info.candidates), compaction_counter);
|
||||
}
|
||||
return sstables::compaction_descriptor(std::move(info.candidates),
|
||||
return sstables::compaction_descriptor(std::move(info.candidates), _table_s.get_sstable_set(),
|
||||
service::get_local_compaction_priority(), next_level, _max_sstable_size_in_bytes);
|
||||
} else {
|
||||
logger.debug("No compaction candidates for L{}", level);
|
||||
@@ -162,7 +165,21 @@ public:
|
||||
* If no compactions are necessary, will return null
|
||||
*/
|
||||
sstables::compaction_descriptor get_compaction_candidates(const std::vector<std::optional<dht::decorated_key>>& last_compacted_keys,
|
||||
const std::vector<int>& compaction_counter) {
|
||||
std::vector<int>& compaction_counter) {
|
||||
#if 0
|
||||
// during bootstrap we only do size tiering in L0 to make sure
|
||||
// the streamed files can be placed in their original levels
|
||||
if (StorageService.instance.isBootstrapMode())
|
||||
{
|
||||
List<SSTableReader> mostInteresting = getSSTablesForSTCS(getLevel(0));
|
||||
if (!mostInteresting.isEmpty())
|
||||
{
|
||||
logger.info("Bootstrapping - doing STCS in L0");
|
||||
return new CompactionCandidate(mostInteresting, 0, Long.MAX_VALUE);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
#endif
|
||||
// LevelDB gives each level a score of how much data it contains vs its ideal amount, and
|
||||
// compacts the level with the highest score. But this falls apart spectacularly once you
|
||||
// get behind. Consider this set of levels:
|
||||
@@ -214,7 +231,7 @@ public:
|
||||
_table_s.min_compaction_threshold(), _schema->max_compaction_threshold(), _stcs_options);
|
||||
if (!most_interesting.empty()) {
|
||||
logger.debug("L0 is too far behind, performing size-tiering there first");
|
||||
return sstables::compaction_descriptor(std::move(most_interesting),
|
||||
return sstables::compaction_descriptor(std::move(most_interesting), _table_s.get_sstable_set(),
|
||||
service::get_local_compaction_priority());
|
||||
}
|
||||
}
|
||||
@@ -229,7 +246,7 @@ public:
|
||||
auto info = get_candidates_for(0, last_compacted_keys);
|
||||
if (!info.candidates.empty()) {
|
||||
auto next_level = get_next_level(info.candidates, info.can_promote);
|
||||
return sstables::compaction_descriptor(std::move(info.candidates),
|
||||
return sstables::compaction_descriptor(std::move(info.candidates), _table_s.get_sstable_set(),
|
||||
service::get_local_compaction_priority(), next_level, _max_sstable_size_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
|
||||
#pragma once
|
||||
#include "compaction_backlog_manager.hh"
|
||||
#include "size_tiered_compaction_strategy.hh"
|
||||
#include <cmath>
|
||||
#include <ctgmath>
|
||||
|
||||
@@ -64,33 +63,30 @@
|
||||
// certain point in time, whose size is the amount of bytes currently written. So all we need
|
||||
// to do is keep track of them too, and add the current estimate to the static part of (4).
|
||||
class size_tiered_backlog_tracker final : public compaction_backlog_tracker::impl {
|
||||
sstables::size_tiered_compaction_strategy_options _stcs_options;
|
||||
int64_t _total_bytes = 0;
|
||||
double _sstables_backlog_contribution = 0.0f;
|
||||
std::unordered_set<sstables::shared_sstable> _sstables_contributing_backlog;
|
||||
std::unordered_set<sstables::shared_sstable> _all;
|
||||
|
||||
struct inflight_component {
|
||||
uint64_t total_bytes = 0;
|
||||
int64_t total_bytes = 0;
|
||||
double contribution = 0;
|
||||
};
|
||||
|
||||
inflight_component partial_backlog(const compaction_backlog_tracker::ongoing_writes& ongoing_writes) const;
|
||||
|
||||
inflight_component compacted_backlog(const compaction_backlog_tracker::ongoing_compactions& ongoing_compactions) const;
|
||||
|
||||
double log4(double x) const {
|
||||
double inv_log_4 = 1.0f / std::log(4);
|
||||
return log(x) * inv_log_4;
|
||||
}
|
||||
|
||||
void refresh_sstables_backlog_contribution();
|
||||
public:
|
||||
size_tiered_backlog_tracker(sstables::size_tiered_compaction_strategy_options stcs_options) : _stcs_options(stcs_options) {}
|
||||
|
||||
virtual double backlog(const compaction_backlog_tracker::ongoing_writes& ow, const compaction_backlog_tracker::ongoing_compactions& oc) const override;
|
||||
|
||||
virtual void add_sstable(sstables::shared_sstable sst) override;
|
||||
|
||||
// Removing could be the result of a failure of an in progress write, successful finish of a
|
||||
// compaction, or some one-off operation, like drop
|
||||
virtual void replace_sstables(std::vector<sstables::shared_sstable> old_ssts, std::vector<sstables::shared_sstable> new_ssts) override;
|
||||
virtual void remove_sstable(sstables::shared_sstable sst) override;
|
||||
|
||||
int64_t total_bytes() const {
|
||||
return _total_bytes;
|
||||
|
||||
@@ -112,9 +112,8 @@ std::vector<sstables::shared_sstable>
|
||||
size_tiered_compaction_strategy::most_interesting_bucket(std::vector<std::vector<sstables::shared_sstable>> buckets,
|
||||
unsigned min_threshold, unsigned max_threshold)
|
||||
{
|
||||
using bucket_t = std::vector<sstables::shared_sstable>;
|
||||
std::vector<bucket_t> pruned_buckets;
|
||||
pruned_buckets.reserve(buckets.size());
|
||||
std::vector<std::pair<std::vector<sstables::shared_sstable>, uint64_t>> pruned_buckets_and_hotness;
|
||||
pruned_buckets_and_hotness.reserve(buckets.size());
|
||||
|
||||
// FIXME: add support to get hotness for each bucket.
|
||||
|
||||
@@ -122,23 +121,26 @@ size_tiered_compaction_strategy::most_interesting_bucket(std::vector<std::vector
|
||||
// FIXME: the coldest sstables will be trimmed to meet the threshold, so we must add support to this feature
|
||||
// by converting SizeTieredCompactionStrategy::trimToThresholdWithHotness.
|
||||
// By the time being, we will only compact buckets that meet the threshold.
|
||||
if (!is_bucket_interesting(bucket, min_threshold)) {
|
||||
continue;
|
||||
}
|
||||
bucket.resize(std::min(bucket.size(), size_t(max_threshold)));
|
||||
pruned_buckets.push_back(std::move(bucket));
|
||||
if (is_bucket_interesting(bucket, min_threshold)) {
|
||||
auto avg = avg_size(bucket);
|
||||
pruned_buckets_and_hotness.push_back({ std::move(bucket), avg });
|
||||
}
|
||||
}
|
||||
|
||||
if (pruned_buckets.empty()) {
|
||||
if (pruned_buckets_and_hotness.empty()) {
|
||||
return std::vector<sstables::shared_sstable>();
|
||||
}
|
||||
|
||||
// Pick the bucket with more elements, as efficiency of same-tier compactions increases with number of files.
|
||||
auto& max = *std::max_element(pruned_buckets.begin(), pruned_buckets.end(), [] (const bucket_t& i, const bucket_t& j) {
|
||||
auto& min = *std::min_element(pruned_buckets_and_hotness.begin(), pruned_buckets_and_hotness.end(), [] (auto& i, auto& j) {
|
||||
// FIXME: ignoring hotness by the time being.
|
||||
return i.size() < j.size();
|
||||
|
||||
return i.first.size() > j.first.size();
|
||||
});
|
||||
return std::move(max);
|
||||
auto hottest = std::move(min.first);
|
||||
|
||||
return hottest;
|
||||
}
|
||||
|
||||
compaction_descriptor
|
||||
@@ -154,13 +156,13 @@ size_tiered_compaction_strategy::get_sstables_for_compaction(table_state& table_
|
||||
|
||||
if (is_any_bucket_interesting(buckets, min_threshold)) {
|
||||
std::vector<sstables::shared_sstable> most_interesting = most_interesting_bucket(std::move(buckets), min_threshold, max_threshold);
|
||||
return sstables::compaction_descriptor(std::move(most_interesting), service::get_local_compaction_priority());
|
||||
return sstables::compaction_descriptor(std::move(most_interesting), table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
// If we are not enforcing min_threshold explicitly, try any pair of SStables in the same tier.
|
||||
if (!table_s.compaction_enforce_min_threshold() && is_any_bucket_interesting(buckets, 2)) {
|
||||
std::vector<sstables::shared_sstable> most_interesting = most_interesting_bucket(std::move(buckets), 2, max_threshold);
|
||||
return sstables::compaction_descriptor(std::move(most_interesting), service::get_local_compaction_priority());
|
||||
return sstables::compaction_descriptor(std::move(most_interesting), table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
// if there is no sstable to compact in standard way, try compacting single sstable whose droppable tombstone
|
||||
@@ -180,7 +182,7 @@ size_tiered_compaction_strategy::get_sstables_for_compaction(table_state& table_
|
||||
auto it = std::min_element(sstables.begin(), sstables.end(), [] (auto& i, auto& j) {
|
||||
return i->get_stats_metadata().min_timestamp < j->get_stats_metadata().min_timestamp;
|
||||
});
|
||||
return sstables::compaction_descriptor({ *it }, service::get_local_compaction_priority());
|
||||
return sstables::compaction_descriptor({ *it }, table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
return sstables::compaction_descriptor();
|
||||
}
|
||||
@@ -201,7 +203,7 @@ int64_t size_tiered_compaction_strategy::estimated_pending_compactions(table_sta
|
||||
int max_threshold = table_s.schema()->max_compaction_threshold();
|
||||
std::vector<sstables::shared_sstable> sstables;
|
||||
|
||||
auto all_sstables = table_s.main_sstable_set().all();
|
||||
auto all_sstables = table_s.get_sstable_set().all();
|
||||
sstables.reserve(all_sstables->size());
|
||||
for (auto& entry : *all_sstables) {
|
||||
sstables.push_back(entry);
|
||||
@@ -240,7 +242,7 @@ size_tiered_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
// All sstables can be reshaped at once if the amount of overlapping will not cause memory usage to be high,
|
||||
// which is possible because partitioned set is able to incrementally open sstables during compaction
|
||||
if (sstable_set_overlapping_count(schema, input) <= max_sstables) {
|
||||
compaction_descriptor desc(std::move(input), iop);
|
||||
compaction_descriptor desc(std::move(input), std::optional<sstables::sstable_set>(), iop);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -256,7 +258,7 @@ size_tiered_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
});
|
||||
bucket.resize(max_sstables);
|
||||
}
|
||||
compaction_descriptor desc(std::move(bucket), iop);
|
||||
compaction_descriptor desc(std::move(bucket), std::optional<sstables::sstable_set>(), iop);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -265,30 +267,4 @@ size_tiered_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
return compaction_descriptor();
|
||||
}
|
||||
|
||||
std::vector<compaction_descriptor>
|
||||
size_tiered_compaction_strategy::get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const {
|
||||
std::vector<compaction_descriptor> ret;
|
||||
const auto& schema = table_s.schema();
|
||||
unsigned max_threshold = schema->max_compaction_threshold();
|
||||
|
||||
for (auto& bucket : get_buckets(candidates)) {
|
||||
if (bucket.size() > max_threshold) {
|
||||
// preserve token contiguity
|
||||
std::ranges::sort(bucket, [&schema] (const shared_sstable& a, const shared_sstable& b) {
|
||||
return a->get_first_decorated_key().tri_compare(*schema, b->get_first_decorated_key()) < 0;
|
||||
});
|
||||
}
|
||||
auto it = bucket.begin();
|
||||
while (it != bucket.end()) {
|
||||
unsigned remaining = std::distance(it, bucket.end());
|
||||
unsigned needed = std::min(remaining, max_threshold);
|
||||
std::vector<shared_sstable> sstables;
|
||||
std::move(it, it + needed, std::back_inserter(sstables));
|
||||
ret.push_back(compaction_descriptor(std::move(sstables), service::get_local_compaction_priority()));
|
||||
std::advance(it, needed);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
#include "sstables/sstables.hh"
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
|
||||
class size_tiered_backlog_tracker;
|
||||
|
||||
namespace sstables {
|
||||
|
||||
class size_tiered_compaction_strategy_options {
|
||||
@@ -96,7 +94,19 @@ class size_tiered_compaction_strategy : public compaction_strategy_impl {
|
||||
std::vector<sstables::shared_sstable>
|
||||
most_interesting_bucket(std::vector<std::vector<sstables::shared_sstable>> buckets, unsigned min_threshold, unsigned max_threshold);
|
||||
|
||||
static bool is_bucket_interesting(const std::vector<sstables::shared_sstable>& bucket, int min_threshold) {
|
||||
// Return the average size of a given list of sstables.
|
||||
uint64_t avg_size(std::vector<sstables::shared_sstable> const& sstables) const {
|
||||
assert(sstables.size() > 0); // this should never fail
|
||||
uint64_t n = 0;
|
||||
|
||||
for (auto const& sstable : sstables) {
|
||||
n += sstable->data_size();
|
||||
}
|
||||
|
||||
return n / sstables.size();
|
||||
}
|
||||
|
||||
bool is_bucket_interesting(const std::vector<sstables::shared_sstable>& bucket, int min_threshold) const {
|
||||
return bucket.size() >= size_t(min_threshold);
|
||||
}
|
||||
|
||||
@@ -113,8 +123,6 @@ public:
|
||||
|
||||
virtual compaction_descriptor get_sstables_for_compaction(table_state& table_s, strategy_control& control, std::vector<sstables::shared_sstable> candidates) override;
|
||||
|
||||
virtual std::vector<compaction_descriptor> get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const override;
|
||||
|
||||
static int64_t estimated_pending_compactions(const std::vector<sstables::shared_sstable>& sstables,
|
||||
int min_threshold, int max_threshold, size_tiered_compaction_strategy_options options);
|
||||
virtual int64_t estimated_pending_compactions(table_state& table_s) const override;
|
||||
@@ -134,7 +142,6 @@ public:
|
||||
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, const ::io_priority_class& iop, reshape_mode mode) override;
|
||||
|
||||
friend class ::size_tiered_backlog_tracker;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@
|
||||
|
||||
#include "schema_fwd.hh"
|
||||
#include "sstables/sstable_set.hh"
|
||||
#include "sstables/sstables_manager.hh"
|
||||
#include "compaction_descriptor.hh"
|
||||
|
||||
class reader_permit;
|
||||
|
||||
@@ -30,19 +28,13 @@ public:
|
||||
// min threshold as defined by table.
|
||||
virtual unsigned min_compaction_threshold() const noexcept = 0;
|
||||
virtual bool compaction_enforce_min_threshold() const noexcept = 0;
|
||||
virtual const sstables::sstable_set& main_sstable_set() const = 0;
|
||||
virtual const sstables::sstable_set& maintenance_sstable_set() const = 0;
|
||||
virtual const sstables::sstable_set& get_sstable_set() const = 0;
|
||||
virtual std::unordered_set<sstables::shared_sstable> fully_expired_sstables(const std::vector<sstables::shared_sstable>& sstables, gc_clock::time_point compaction_time) const = 0;
|
||||
virtual const std::vector<sstables::shared_sstable>& compacted_undeleted_sstables() const noexcept = 0;
|
||||
virtual sstables::compaction_strategy& get_compaction_strategy() const noexcept = 0;
|
||||
virtual reader_permit make_compaction_reader_permit() const = 0;
|
||||
virtual sstables::sstables_manager& get_sstables_manager() noexcept = 0;
|
||||
virtual sstables::shared_sstable make_sstable() const = 0;
|
||||
virtual sstables::sstable_writer_config configure_writer(sstring origin) const = 0;
|
||||
virtual api::timestamp_type min_memtable_timestamp() const = 0;
|
||||
virtual future<> update_compaction_history(utils::UUID compaction_id, sstring ks_name, sstring cf_name, std::chrono::milliseconds ended_at, int64_t bytes_in, int64_t bytes_out) = 0;
|
||||
virtual future<> on_compaction_completion(sstables::compaction_completion_desc desc, sstables::offstrategy offstrategy) = 0;
|
||||
virtual bool is_auto_compaction_disabled_by_user() const noexcept = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -178,7 +178,7 @@ time_window_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
});
|
||||
multi_window.resize(max_sstables);
|
||||
}
|
||||
compaction_descriptor desc(std::move(multi_window), iop);
|
||||
compaction_descriptor desc(std::move(multi_window), std::optional<sstables::sstable_set>(), iop);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -204,7 +204,7 @@ time_window_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
}
|
||||
}
|
||||
if (!single_window.empty()) {
|
||||
compaction_descriptor desc(std::move(single_window), iop);
|
||||
compaction_descriptor desc(std::move(single_window), std::optional<sstables::sstable_set>(), iop);
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
}
|
||||
@@ -221,27 +221,24 @@ time_window_compaction_strategy::get_sstables_for_compaction(table_state& table_
|
||||
return compaction_descriptor();
|
||||
}
|
||||
|
||||
auto now = db_clock::now();
|
||||
if (now - _last_expired_check > _options.expired_sstable_check_frequency) {
|
||||
clogger.debug("[{}] TWCS expired check sufficiently far in the past, checking for fully expired SSTables", fmt::ptr(this));
|
||||
// Find fully expired SSTables. Those will be included no matter what.
|
||||
std::unordered_set<shared_sstable> expired;
|
||||
|
||||
// Find fully expired SSTables. Those will be included no matter what.
|
||||
auto expired = table_s.fully_expired_sstables(candidates, compaction_time);
|
||||
if (!expired.empty()) {
|
||||
clogger.debug("[{}] Going to compact {} expired sstables", fmt::ptr(this), expired.size());
|
||||
return compaction_descriptor(has_only_fully_expired::yes, std::vector<shared_sstable>(expired.begin(), expired.end()), service::get_local_compaction_priority());
|
||||
}
|
||||
// Keep checking for fully_expired_sstables until we don't find
|
||||
// any among the candidates, meaning they are either already compacted
|
||||
// or registered for compaction.
|
||||
_last_expired_check = now;
|
||||
if (db_clock::now() - _last_expired_check > _options.expired_sstable_check_frequency) {
|
||||
clogger.debug("TWCS expired check sufficiently far in the past, checking for fully expired SSTables");
|
||||
expired = table_s.fully_expired_sstables(candidates, compaction_time);
|
||||
_last_expired_check = db_clock::now();
|
||||
} else {
|
||||
clogger.debug("[{}] TWCS skipping check for fully expired SSTables", fmt::ptr(this));
|
||||
clogger.debug("TWCS skipping check for fully expired SSTables");
|
||||
}
|
||||
|
||||
if (!expired.empty()) {
|
||||
clogger.debug("Going to compact {} expired sstables", expired.size());
|
||||
return compaction_descriptor(has_only_fully_expired::yes, std::vector<shared_sstable>(expired.begin(), expired.end()), table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
auto compaction_candidates = get_next_non_expired_sstables(table_s, control, std::move(candidates), compaction_time);
|
||||
clogger.debug("[{}] Going to compact {} non-expired sstables", fmt::ptr(this), compaction_candidates.size());
|
||||
return compaction_descriptor(std::move(compaction_candidates), service::get_local_compaction_priority());
|
||||
return compaction_descriptor(std::move(compaction_candidates), table_s.get_sstable_set(), service::get_local_compaction_priority());
|
||||
}
|
||||
|
||||
time_window_compaction_strategy::bucket_compaction_mode
|
||||
@@ -307,7 +304,7 @@ time_window_compaction_strategy::get_window_lower_bound(std::chrono::seconds sst
|
||||
}
|
||||
|
||||
std::pair<std::map<timestamp_type, std::vector<shared_sstable>>, timestamp_type>
|
||||
time_window_compaction_strategy::get_buckets(std::vector<shared_sstable> files, const time_window_compaction_strategy_options& options) {
|
||||
time_window_compaction_strategy::get_buckets(std::vector<shared_sstable> files, time_window_compaction_strategy_options& options) {
|
||||
std::map<timestamp_type, std::vector<shared_sstable>> buckets;
|
||||
|
||||
timestamp_type max_timestamp = 0;
|
||||
@@ -412,14 +409,4 @@ void time_window_compaction_strategy::update_estimated_compaction_by_tasks(std::
|
||||
_estimated_remaining_tasks = n;
|
||||
}
|
||||
|
||||
std::vector<compaction_descriptor>
|
||||
time_window_compaction_strategy::get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const {
|
||||
std::vector<compaction_descriptor> ret;
|
||||
for (auto&& [_, sstables] : get_buckets(std::move(candidates), _options).first) {
|
||||
auto per_window_jobs = size_tiered_compaction_strategy(_stcs_options).get_cleanup_compaction_jobs(table_s, std::move(sstables));
|
||||
std::move(per_window_jobs.begin(), per_window_jobs.end(), std::back_inserter(ret));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
/*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2017-present ScyllaDB
|
||||
*
|
||||
@@ -85,8 +88,6 @@ public:
|
||||
public:
|
||||
time_window_compaction_strategy(const std::map<sstring, sstring>& options);
|
||||
virtual compaction_descriptor get_sstables_for_compaction(table_state& table_s, strategy_control& control, std::vector<shared_sstable> candidates) override;
|
||||
|
||||
virtual std::vector<compaction_descriptor> get_cleanup_compaction_jobs(table_state& table_s, std::vector<shared_sstable> candidates) const override;
|
||||
private:
|
||||
static timestamp_type
|
||||
to_timestamp_type(time_window_compaction_strategy_options::timestamp_resolutions resolution, int64_t timestamp_from_sstable) {
|
||||
@@ -122,7 +123,7 @@ public:
|
||||
// @return A pair, where the left element is the bucket representation (map of timestamp to sstablereader),
|
||||
// and the right is the highest timestamp seen
|
||||
static std::pair<std::map<timestamp_type, std::vector<shared_sstable>>, timestamp_type>
|
||||
get_buckets(std::vector<shared_sstable> files, const time_window_compaction_strategy_options& options);
|
||||
get_buckets(std::vector<shared_sstable> files, time_window_compaction_strategy_options& options);
|
||||
|
||||
std::vector<shared_sstable>
|
||||
newest_bucket(table_state& table_s, strategy_control& control, std::map<timestamp_type, std::vector<shared_sstable>> buckets,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user