Compare commits
256 Commits
next-5.4
...
branch-5.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6c2624c86 | ||
|
|
f7d9afd209 | ||
|
|
b011cc2e78 | ||
|
|
fb466dd7b7 | ||
|
|
697e090659 | ||
|
|
2c518f3131 | ||
|
|
e941a5ac34 | ||
|
|
3a7ce5e8aa | ||
|
|
efa4f312f5 | ||
|
|
fb4b71ea02 | ||
|
|
7387922a29 | ||
|
|
cb78c3bf2c | ||
|
|
aeac63a3ee | ||
|
|
e7b50fb8d3 | ||
|
|
6b21f2a351 | ||
|
|
0db8e627a5 | ||
|
|
f1121d2149 | ||
|
|
a0ca8abe42 | ||
|
|
8bceac1713 | ||
|
|
6bcc7c6ed5 | ||
|
|
67f85875cc | ||
|
|
8b874cd4e4 | ||
|
|
b08c582134 | ||
|
|
41556b5f63 | ||
|
|
23e7e594c0 | ||
|
|
e6ac13314d | ||
|
|
382d815459 | ||
|
|
a867b2c0e5 | ||
|
|
846edf78c6 | ||
|
|
0ccc07322b | ||
|
|
0b170192a1 | ||
|
|
fd4b2a3319 | ||
|
|
416929fb2a | ||
|
|
9d8d7048eb | ||
|
|
bae4155ab2 | ||
|
|
d6e2a326cf | ||
|
|
15645ff40b | ||
|
|
a808fc7172 | ||
|
|
dd260bfa82 | ||
|
|
c46935ed5c | ||
|
|
985d6bc4c2 | ||
|
|
7673ff4ae3 | ||
|
|
c441eebf46 | ||
|
|
bf4fa80dd7 | ||
|
|
2010231fe9 | ||
|
|
0a51eb55e3 | ||
|
|
d9c6c6283b | ||
|
|
90a5344261 | ||
|
|
68da667288 | ||
|
|
9adb1a8fdd | ||
|
|
7623fe01b7 | ||
|
|
3b0a0c4876 | ||
|
|
019d5cde1b | ||
|
|
a2e255833a | ||
|
|
f4aa5cacb1 | ||
|
|
8ea9a16f9e | ||
|
|
1aa5283a38 | ||
|
|
2e7b1858ad | ||
|
|
2542b57ddc | ||
|
|
01a9871fc3 | ||
|
|
6bb7fac8d8 | ||
|
|
5dff7489b1 | ||
|
|
2775b1d136 | ||
|
|
2ae5675c0f | ||
|
|
d507ad9424 | ||
|
|
413af945c0 | ||
|
|
9a71680dc7 | ||
|
|
94b8baa797 | ||
|
|
e372a5fe0a | ||
|
|
692e5ed175 | ||
|
|
5a299f65ff | ||
|
|
f4ae2fa5f9 | ||
|
|
07c20bdfea | ||
|
|
8a36c4be54 | ||
|
|
bf92c2b44c | ||
|
|
0e388d2140 | ||
|
|
288eb9d231 | ||
|
|
9219a59802 | ||
|
|
f9cea4dc51 | ||
|
|
081b2b76cc | ||
|
|
dfb229a18a | ||
|
|
60da855c2d | ||
|
|
1718861e94 | ||
|
|
e03e9b1abe | ||
|
|
26c51025c1 | ||
|
|
5c39a4524a | ||
|
|
9823e8d9c5 | ||
|
|
b48c9cae95 | ||
|
|
14077d2def | ||
|
|
25508705a8 | ||
|
|
347da028e9 | ||
|
|
874fa15202 | ||
|
|
99c03cb2af | ||
|
|
6c35d3c5cd | ||
|
|
707622ce15 | ||
|
|
bab36b604c | ||
|
|
8840711e79 | ||
|
|
af18bb3fe9 | ||
|
|
6003cba7a8 | ||
|
|
e9afd076eb | ||
|
|
c5f732d42a | ||
|
|
13a1408135 | ||
|
|
6685e00dd4 | ||
|
|
350bb57291 | ||
|
|
e186ad5b6c | ||
|
|
139e9afc89 | ||
|
|
a42c6f190c | ||
|
|
2b8f0cbd97 | ||
|
|
a2a762e18d | ||
|
|
aa973e2b9e | ||
|
|
e0777f1112 | ||
|
|
cc6311cbc7 | ||
|
|
0354e13718 | ||
|
|
2750d2e94b | ||
|
|
b4383a389b | ||
|
|
f667c5923a | ||
|
|
e4ba0c56df | ||
|
|
329d55cc4f | ||
|
|
b956293f47 | ||
|
|
6a8c2d3f56 | ||
|
|
27a35c7f98 | ||
|
|
d83134a245 | ||
|
|
b844d14829 | ||
|
|
184df0393e | ||
|
|
1b550dd301 | ||
|
|
01ce53d7fb | ||
|
|
e9c7f89b32 | ||
|
|
93f468c12c | ||
|
|
e54ae9efd9 | ||
|
|
ef40e59c0e | ||
|
|
8c56b0b268 | ||
|
|
fc78d88783 | ||
|
|
31a20c4c54 | ||
|
|
7e42bcfd61 | ||
|
|
2107ffe2d2 | ||
|
|
5a97a1060e | ||
|
|
2b0487c900 | ||
|
|
d3b3c53d9f | ||
|
|
50c2c1b1d4 | ||
|
|
aa647a637a | ||
|
|
2c0040fcb3 | ||
|
|
54564adb7c | ||
|
|
839876e8f2 | ||
|
|
36002e2b7c | ||
|
|
91a8f9e09b | ||
|
|
bc29f350dd | ||
|
|
4fe571f470 | ||
|
|
ebf38eaead | ||
|
|
1c82766f33 | ||
|
|
e1f78c33b4 | ||
|
|
0634b5f734 | ||
|
|
6f020b26e1 | ||
|
|
7f8dcc5657 | ||
|
|
20451760fe | ||
|
|
51b031d04e | ||
|
|
82d1446ca9 | ||
|
|
e0acb0766d | ||
|
|
4f26d489a0 | ||
|
|
43cbc5c836 | ||
|
|
f0c521efdf | ||
|
|
b9a61c8e9a | ||
|
|
32aa1e5287 | ||
|
|
da6a126d79 | ||
|
|
d07e902983 | ||
|
|
3c0fc42f84 | ||
|
|
964ccf9192 | ||
|
|
dfdc128faf | ||
|
|
299122e78d | ||
|
|
23a34d7e42 | ||
|
|
67a2f3aa67 | ||
|
|
66e8cf8cea | ||
|
|
35b66c844c | ||
|
|
9e7a1340b9 | ||
|
|
d5a0750ef3 | ||
|
|
618c483c73 | ||
|
|
f10fd1bc12 | ||
|
|
1891f10141 | ||
|
|
b177dacd36 | ||
|
|
283a722923 | ||
|
|
522d0a81e7 | ||
|
|
cd13911db4 | ||
|
|
32423ebc38 | ||
|
|
97054ee691 | ||
|
|
34085c364f | ||
|
|
323521f4c8 | ||
|
|
1ad59d6a7b | ||
|
|
d3045df9c9 | ||
|
|
be48b7aa8b | ||
|
|
3c4688bcfa | ||
|
|
cc22021876 | ||
|
|
c9e79cb4a3 | ||
|
|
f28542a71e | ||
|
|
527a75a4c0 | ||
|
|
df00f8fcfb | ||
|
|
41a00c744f | ||
|
|
2d7b6cd702 | ||
|
|
ff79228178 | ||
|
|
1803124cc6 | ||
|
|
6fcbf66bfb | ||
|
|
e9a3dee234 | ||
|
|
279cd44c7f | ||
|
|
c99f768381 | ||
|
|
89a540d54a | ||
|
|
338edcc02e | ||
|
|
a8eb5164b2 | ||
|
|
9accb44f9c | ||
|
|
8878007106 | ||
|
|
9da666e778 | ||
|
|
aca355dec1 | ||
|
|
efbb2efd3f | ||
|
|
44dc5c4a1d | ||
|
|
6b34ba3a4f | ||
|
|
f1e25cb4a6 | ||
|
|
c9798746ae | ||
|
|
7f70ffc5ce | ||
|
|
551636ec89 | ||
|
|
e1130a01e7 | ||
|
|
b0233cb7c5 | ||
|
|
e480c5bf4d | ||
|
|
7d90f7e93f | ||
|
|
3e6e8579c6 | ||
|
|
3e98e17d18 | ||
|
|
a214f8cf6e | ||
|
|
e8b92fe34d | ||
|
|
fa479c84ac | ||
|
|
40c26dd2c5 | ||
|
|
2c6f069fd1 | ||
|
|
e27dff0c50 | ||
|
|
3f03260ffb | ||
|
|
1315135fca | ||
|
|
f92622e0de | ||
|
|
3bca608db5 | ||
|
|
a93b72d5dd | ||
|
|
d58ca2edbd | ||
|
|
75740ace2a | ||
|
|
d7a1bf6331 | ||
|
|
bbd7d657cc | ||
|
|
f5bf4c81d1 | ||
|
|
02e8336659 | ||
|
|
601812e11b | ||
|
|
ea466320d2 | ||
|
|
25ea831a15 | ||
|
|
8648c79c9e | ||
|
|
7ae4d0e6f8 | ||
|
|
f3564db941 | ||
|
|
97caf12836 | ||
|
|
839d9ef41a | ||
|
|
782bd50f92 | ||
|
|
0a4d971b4a | ||
|
|
22562f767f | ||
|
|
eb80dd1db5 | ||
|
|
51d699ee21 | ||
|
|
83a33bff8c | ||
|
|
273563b9ad | ||
|
|
891990ec09 | ||
|
|
da0cd2b107 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1,3 +1,2 @@
|
||||
*.cc diff=cpp
|
||||
*.hh diff=cpp
|
||||
*.svg binary
|
||||
|
||||
42
.github/CODEOWNERS
vendored
42
.github/CODEOWNERS
vendored
@@ -2,17 +2,17 @@
|
||||
auth/* @elcallio @vladzcloudius
|
||||
|
||||
# CACHE
|
||||
row_cache* @tgrabiec
|
||||
*mutation* @tgrabiec
|
||||
test/boost/mvcc* @tgrabiec
|
||||
row_cache* @tgrabiec @haaawk
|
||||
*mutation* @tgrabiec @haaawk
|
||||
test/boost/mvcc* @tgrabiec @haaawk
|
||||
|
||||
# CDC
|
||||
cdc/* @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
cdc/* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @haaawk @kbr- @elcallio @piodul @jul-stas
|
||||
|
||||
# COMMITLOG / BATCHLOG
|
||||
db/commitlog/* @elcallio @eliransin
|
||||
db/commitlog/* @elcallio
|
||||
db/batch* @elcallio
|
||||
|
||||
# COORDINATOR
|
||||
@@ -25,15 +25,11 @@ compaction/* @raphaelsc @nyh
|
||||
transport/*
|
||||
|
||||
# CQL QUERY LANGUAGE
|
||||
cql3/* @tgrabiec @cvybhu @nyh
|
||||
cql3/* @tgrabiec @psarna @cvybhu
|
||||
|
||||
# COUNTERS
|
||||
counters* @jul-stas
|
||||
tests/counter_test* @jul-stas
|
||||
|
||||
# DOCS
|
||||
docs/* @annastuchlik @tzach
|
||||
docs/alternator @annastuchlik @tzach @nyh @havaker @nuivall
|
||||
counters* @haaawk @jul-stas
|
||||
tests/counter_test* @haaawk @jul-stas
|
||||
|
||||
# GOSSIP
|
||||
gms/* @tgrabiec @asias
|
||||
@@ -45,9 +41,9 @@ dist/docker/*
|
||||
utils/logalloc* @tgrabiec
|
||||
|
||||
# MATERIALIZED VIEWS
|
||||
db/view/* @nyh @cvybhu @piodul
|
||||
cql3/statements/*view* @nyh @cvybhu @piodul
|
||||
test/boost/view_* @nyh @cvybhu @piodul
|
||||
db/view/* @nyh @psarna
|
||||
cql3/statements/*view* @nyh @psarna
|
||||
test/boost/view_* @nyh @psarna
|
||||
|
||||
# PACKAGING
|
||||
dist/* @syuu1228
|
||||
@@ -62,9 +58,9 @@ service/migration* @tgrabiec @nyh
|
||||
schema* @tgrabiec @nyh
|
||||
|
||||
# SECONDARY INDEXES
|
||||
index/* @nyh @cvybhu @piodul
|
||||
cql3/statements/*index* @nyh @cvybhu @piodul
|
||||
test/boost/*index* @nyh @cvybhu @piodul
|
||||
db/index/* @nyh @psarna
|
||||
cql3/statements/*index* @nyh @psarna
|
||||
test/boost/*index* @nyh @psarna
|
||||
|
||||
# SSTABLES
|
||||
sstables/* @tgrabiec @raphaelsc @nyh
|
||||
@@ -74,11 +70,11 @@ streaming/* @tgrabiec @asias
|
||||
service/storage_service.* @tgrabiec @asias
|
||||
|
||||
# ALTERNATOR
|
||||
alternator/* @nyh @havaker @nuivall
|
||||
test/alternator/* @nyh @havaker @nuivall
|
||||
alternator/* @nyh @psarna
|
||||
test/alternator/* @nyh @psarna
|
||||
|
||||
# HINTED HANDOFF
|
||||
db/hints/* @piodul @vladzcloudius @eliransin
|
||||
db/hints/* @haaawk @piodul @vladzcloudius
|
||||
|
||||
# REDIS
|
||||
redis/* @nyh @syuu1228
|
||||
|
||||
87
.github/scripts/label_promoted_commits.py
vendored
87
.github/scripts/label_promoted_commits.py
vendored
@@ -1,87 +0,0 @@
|
||||
from github import Github
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
|
||||
try:
|
||||
github_token = os.environ["GITHUB_TOKEN"]
|
||||
except KeyError:
|
||||
print("Please set the 'GITHUB_TOKEN' environment variable")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--repository', type=str, required=True,
|
||||
help='Github repository name (e.g., scylladb/scylladb)')
|
||||
parser.add_argument('--commit_before_merge', type=str, required=True, help='Git commit ID to start labeling from ('
|
||||
'newest commit).')
|
||||
parser.add_argument('--commit_after_merge', type=str, required=True,
|
||||
help='Git commit ID to end labeling at (oldest '
|
||||
'commit, exclusive).')
|
||||
parser.add_argument('--update_issue', type=bool, default=False, help='Set True to update issues when backport was '
|
||||
'done')
|
||||
parser.add_argument('--ref', type=str, required=True, help='PR target branch')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def add_comment_and_close_pr(pr, comment):
|
||||
if pr.state == 'open':
|
||||
pr.create_issue_comment(comment)
|
||||
pr.edit(state="closed")
|
||||
|
||||
|
||||
def mark_backport_done(repo, ref_pr_number, branch):
|
||||
pr = repo.get_pull(int(ref_pr_number))
|
||||
label_to_remove = f'backport/{branch}'
|
||||
label_to_add = f'{label_to_remove}-done'
|
||||
current_labels = [label.name for label in pr.get_labels()]
|
||||
if label_to_remove in current_labels:
|
||||
pr.remove_from_labels(label_to_remove)
|
||||
if label_to_add not in current_labels:
|
||||
pr.add_to_labels(label_to_add)
|
||||
|
||||
|
||||
def main():
|
||||
# This script is triggered by a push event to either the master branch or a branch named branch-x.y (where x and y represent version numbers). Based on the pushed branch, the script performs the following actions:
|
||||
# - When ref branch is `master`, it will add the `promoted-to-master` label, which we need later for the auto backport process
|
||||
# - When ref branch is `branch-x.y` (which means we backported a patch), it will replace in the original PR the `backport/x.y` label with `backport/x.y-done` and will close the backport PR (Since GitHub close only the one referring to default branch)
|
||||
args = parser()
|
||||
pr_pattern = re.compile(r'Closes .*#([0-9]+)')
|
||||
target_branch = re.search(r'branch-(\d+\.\d+)', args.ref)
|
||||
g = Github(github_token)
|
||||
repo = g.get_repo(args.repository, lazy=False)
|
||||
commits = repo.compare(head=args.commit_after_merge, base=args.commit_before_merge)
|
||||
processed_prs = set()
|
||||
# Print commit information
|
||||
for commit in commits.commits:
|
||||
print(f'Commit sha is: {commit.sha}')
|
||||
match = pr_pattern.search(commit.commit.message)
|
||||
if match:
|
||||
pr_number = int(match.group(1))
|
||||
if pr_number in processed_prs:
|
||||
continue
|
||||
if target_branch:
|
||||
pr = repo.get_pull(pr_number)
|
||||
branch_name = target_branch[1]
|
||||
refs_pr = re.findall(r'Refs (?:#|https.*?)(\d+)', pr.body)
|
||||
if refs_pr:
|
||||
print(f'branch-{target_branch.group(1)}, pr number is: {pr_number}')
|
||||
# 1. change the backport label of the parent PR to note that
|
||||
# we've merge the corresponding backport PR
|
||||
# 2. close the backport PR and leave a comment on it to note
|
||||
# that it has been merged with a certain git commit,
|
||||
ref_pr_number = refs_pr[0]
|
||||
mark_backport_done(repo, ref_pr_number, branch_name)
|
||||
comment = f'Closed via {commit.sha}'
|
||||
add_comment_and_close_pr(pr, comment)
|
||||
else:
|
||||
print(f'master branch, pr number is: {pr_number}')
|
||||
pr = repo.get_pull(pr_number)
|
||||
pr.add_to_labels('promoted-to-master')
|
||||
processed_prs.add(pr_number)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
36
.github/workflows/add-label-when-promoted.yaml
vendored
36
.github/workflows/add-label-when-promoted.yaml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Check if commits are promoted
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- branch-*.*
|
||||
|
||||
env:
|
||||
DEFAULT_BRANCH: 'master'
|
||||
|
||||
jobs:
|
||||
check-commit:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
fetch-depth: 0 # Fetch all history for all tags and branches
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get install -y python3-github
|
||||
|
||||
- name: Run python script
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: python .github/scripts/label_promoted_commits.py --commit_before_merge ${{ github.event.before }} --commit_after_merge ${{ github.event.after }} --repository ${{ github.repository }} --ref ${{ github.ref }}
|
||||
17
.github/workflows/docs-amplify-enhanced.yaml
vendored
17
.github/workflows/docs-amplify-enhanced.yaml
vendored
@@ -1,17 +0,0 @@
|
||||
name: "Docs / Amplify enhanced"
|
||||
|
||||
on: issue_comment
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.event.issue.pull_request }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Amplify enhanced
|
||||
env:
|
||||
TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
uses: scylladb/sphinx-scylladb-theme/.github/actions/amplify-enhanced@master
|
||||
40
.github/workflows/docs-pages.yaml
vendored
40
.github/workflows/docs-pages.yaml
vendored
@@ -1,40 +0,0 @@
|
||||
name: "Docs / Publish"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
env:
|
||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'enterprise'
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Set up env
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" multiversion
|
||||
- name: Build redirects
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" redirects
|
||||
- name: Deploy docs to GitHub Pages
|
||||
run: ./docs/_utils/deploy.sh
|
||||
if: (github.ref_name == 'master' && env.FLAG == 'opensource') || (github.ref_name == 'enterprise' && env.FLAG == 'enterprise')
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
29
.github/workflows/docs-pages@v2.yaml
vendored
Normal file
29
.github/workflows/docs-pages@v2.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "Docs / Publish"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build docs
|
||||
run: make -C docs multiversion
|
||||
- name: Deploy
|
||||
run: ./docs/_utils/deploy.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
32
.github/workflows/docs-pr.yaml
vendored
32
.github/workflows/docs-pr.yaml
vendored
@@ -1,32 +0,0 @@
|
||||
name: "Docs / Build PR"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
env:
|
||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- enterprise
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v3
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Set up env
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" test
|
||||
25
.github/workflows/docs-pr@v1.yaml
vendored
Normal file
25
.github/workflows/docs-pr@v1.yaml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: "Docs / Build PR"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build docs
|
||||
run: make -C docs test
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -22,11 +22,10 @@ resources
|
||||
.pytest_cache
|
||||
/expressions.tokens
|
||||
tags
|
||||
!db/tags/
|
||||
testlog
|
||||
test/*/*.reject
|
||||
.vscode
|
||||
docs/_build
|
||||
docs/poetry.lock
|
||||
compile_commands.json
|
||||
.ccls-cache/
|
||||
.mypy_cache
|
||||
.envrc
|
||||
|
||||
9
.gitmodules
vendored
9
.gitmodules
vendored
@@ -6,6 +6,12 @@
|
||||
path = swagger-ui
|
||||
url = ../scylla-swagger-ui
|
||||
ignore = dirty
|
||||
[submodule "libdeflate"]
|
||||
path = libdeflate
|
||||
url = ../libdeflate
|
||||
[submodule "abseil"]
|
||||
path = abseil
|
||||
url = ../abseil-cpp
|
||||
[submodule "scylla-jmx"]
|
||||
path = tools/jmx
|
||||
url = ../scylla-jmx
|
||||
@@ -15,6 +21,3 @@
|
||||
[submodule "scylla-python3"]
|
||||
path = tools/python3
|
||||
url = ../scylla-python3
|
||||
[submodule "tools/cqlsh"]
|
||||
path = tools/cqlsh
|
||||
url = ../scylla-cqlsh
|
||||
|
||||
3
.mailmap
3
.mailmap
@@ -1,3 +0,0 @@
|
||||
Avi Kivity <avi@scylladb.com> Avi Kivity' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Raphael S. Carvalho <raphaelsc@scylladb.com> Raphael S. Carvalho' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Pavel Emelyanov <xemul@scylladb.com> Pavel Emelyanov' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
912
CMakeLists.txt
912
CMakeLists.txt
@@ -1,229 +1,797 @@
|
||||
cmake_minimum_required(VERSION 3.27)
|
||||
cmake_minimum_required(VERSION 3.18)
|
||||
|
||||
project(scylla)
|
||||
|
||||
include(CTest)
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/seastar/cmake)
|
||||
|
||||
# Set the possible values of build type for cmake-gui
|
||||
set(scylla_build_types
|
||||
"Debug" "Release" "Dev" "Sanitize" "Coverage")
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS
|
||||
${scylla_build_types})
|
||||
if(NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE "Release" CACHE
|
||||
STRING "Choose the type of build." FORCE)
|
||||
message(WARNING "CMAKE_BUILD_TYPE not specified, Using 'Release'")
|
||||
elseif(NOT CMAKE_BUILD_TYPE IN_LIST scylla_build_types)
|
||||
message(FATAL_ERROR "Unknown CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}. "
|
||||
"Following types are supported: ${scylla_build_types}")
|
||||
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
message(STATUS "Setting build type to 'Release' as none was specified.")
|
||||
set(CMAKE_BUILD_TYPE "Release" CACHE
|
||||
STRING "Choose the type of build." FORCE)
|
||||
# Set the possible values of build type for cmake-gui
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS
|
||||
"Debug" "Release" "Dev" "Sanitize")
|
||||
endif()
|
||||
string(TOUPPER "${CMAKE_BUILD_TYPE}" build_mode)
|
||||
include(mode.${build_mode})
|
||||
include(mode.common)
|
||||
add_compile_definitions(
|
||||
${Seastar_DEFINITIONS_${build_mode}}
|
||||
FMT_DEPRECATED_OSTREAM)
|
||||
include(limit_jobs)
|
||||
# Configure Seastar compile options to align with Scylla
|
||||
set(CMAKE_CXX_STANDARD "20" CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_EXTENSIONS ON CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
|
||||
|
||||
set(Seastar_TESTING ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_API_LEVEL 7 CACHE STRING "" FORCE)
|
||||
set(Seastar_APPS ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
if(CMAKE_BUILD_TYPE)
|
||||
string(TOLOWER "${CMAKE_BUILD_TYPE}" BUILD_TYPE)
|
||||
else()
|
||||
set(BUILD_TYPE "release")
|
||||
endif()
|
||||
|
||||
function(default_target_arch arch)
|
||||
set(x86_instruction_sets i386 i686 x86_64)
|
||||
if(CMAKE_SYSTEM_PROCESSOR IN_LIST x86_instruction_sets)
|
||||
set(${arch} "westmere" PARENT_SCOPE)
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR EQUAL "aarch64")
|
||||
set(${arch} "armv8-a+crc+crypto" PARENT_SCOPE)
|
||||
else()
|
||||
set(${arch} "" PARENT_SCOPE)
|
||||
endif()
|
||||
endfunction()
|
||||
default_target_arch(target_arch)
|
||||
if(target_arch)
|
||||
set(target_arch_flag "-march=${target_arch}")
|
||||
endif()
|
||||
|
||||
set(cxx_coro_flag)
|
||||
if (CMAKE_CXX_COMPILER_ID MATCHES GNU)
|
||||
set(cxx_coro_flag -fcoroutines)
|
||||
endif()
|
||||
|
||||
# Configure Seastar compile options to align with Scylla
|
||||
set(Seastar_CXX_FLAGS ${cxx_coro_flag} ${target_arch_flag} CACHE INTERNAL "" FORCE)
|
||||
set(Seastar_CXX_DIALECT gnu++20 CACHE INTERNAL "" FORCE)
|
||||
|
||||
add_subdirectory(seastar)
|
||||
add_subdirectory(abseil)
|
||||
# Exclude absl::strerror from the default "all" target since it's not
|
||||
# used in Scylla build and, moreover, makes use of deprecated glibc APIs,
|
||||
# such as sys_nerr, which are not exposed from "stdio.h" since glibc 2.32,
|
||||
# which happens to be the case for recent Fedora distribution versions.
|
||||
#
|
||||
# Need to use the internal "absl_strerror" target name instead of namespaced
|
||||
# variant because `set_target_properties` does not understand the latter form,
|
||||
# unfortunately.
|
||||
set_target_properties(absl_strerror PROPERTIES EXCLUDE_FROM_ALL TRUE)
|
||||
|
||||
# System libraries dependencies
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS filesystem program_options system thread regex unit_test_framework)
|
||||
find_package(Boost COMPONENTS filesystem program_options system thread regex REQUIRED)
|
||||
find_package(Lua REQUIRED)
|
||||
find_package(ZLIB REQUIRED)
|
||||
find_package(ICU COMPONENTS uc i18n REQUIRED)
|
||||
find_package(absl COMPONENTS hash raw_hash_set REQUIRED)
|
||||
find_package(libdeflate REQUIRED)
|
||||
find_package(libxcrypt REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
find_package(RapidJSON REQUIRED)
|
||||
find_package(Thrift REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
find_package(ICU COMPONENTS uc REQUIRED)
|
||||
|
||||
set(scylla_gen_build_dir "${CMAKE_BINARY_DIR}/gen")
|
||||
file(MAKE_DIRECTORY "${scylla_gen_build_dir}")
|
||||
set(scylla_build_dir "${CMAKE_BINARY_DIR}/build/${BUILD_TYPE}")
|
||||
set(scylla_gen_build_dir "${scylla_build_dir}/gen")
|
||||
file(MAKE_DIRECTORY "${scylla_build_dir}" "${scylla_gen_build_dir}")
|
||||
|
||||
include(add_version_library)
|
||||
generate_scylla_version()
|
||||
# Place libraries, executables and archives in ${buildroot}/build/${mode}/
|
||||
foreach(mode RUNTIME LIBRARY ARCHIVE)
|
||||
set(CMAKE_${mode}_OUTPUT_DIRECTORY "${scylla_build_dir}")
|
||||
endforeach()
|
||||
|
||||
add_library(scylla-main STATIC)
|
||||
target_sources(scylla-main
|
||||
PRIVATE
|
||||
# Generate C++ source files from thrift definitions
|
||||
function(scylla_generate_thrift)
|
||||
set(one_value_args TARGET VAR IN_FILE OUT_DIR SERVICE)
|
||||
cmake_parse_arguments(args "" "${one_value_args}" "" ${ARGN})
|
||||
|
||||
get_filename_component(in_file_name ${args_IN_FILE} NAME_WE)
|
||||
|
||||
set(aux_out_file_name ${args_OUT_DIR}/${in_file_name})
|
||||
set(outputs
|
||||
${aux_out_file_name}_types.cpp
|
||||
${aux_out_file_name}_types.h
|
||||
${aux_out_file_name}_constants.cpp
|
||||
${aux_out_file_name}_constants.h
|
||||
${args_OUT_DIR}/${args_SERVICE}.cpp
|
||||
${args_OUT_DIR}/${args_SERVICE}.h)
|
||||
|
||||
add_custom_command(
|
||||
DEPENDS
|
||||
${args_IN_FILE}
|
||||
thrift
|
||||
OUTPUT ${outputs}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${args_OUT_DIR}
|
||||
COMMAND thrift -gen cpp:cob_style,no_skeleton -out "${args_OUT_DIR}" "${args_IN_FILE}")
|
||||
|
||||
add_custom_target(${args_TARGET}
|
||||
DEPENDS ${outputs})
|
||||
|
||||
set(${args_VAR} ${outputs} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
scylla_generate_thrift(
|
||||
TARGET scylla_thrift_gen_cassandra
|
||||
VAR scylla_thrift_gen_cassandra_files
|
||||
IN_FILE "${CMAKE_SOURCE_DIR}/interface/cassandra.thrift"
|
||||
OUT_DIR ${scylla_gen_build_dir}
|
||||
SERVICE Cassandra)
|
||||
|
||||
# Parse antlr3 grammar files and generate C++ sources
|
||||
function(scylla_generate_antlr3)
|
||||
set(one_value_args TARGET VAR IN_FILE OUT_DIR)
|
||||
cmake_parse_arguments(args "" "${one_value_args}" "" ${ARGN})
|
||||
|
||||
get_filename_component(in_file_pure_name ${args_IN_FILE} NAME)
|
||||
get_filename_component(stem ${in_file_pure_name} NAME_WE)
|
||||
|
||||
set(outputs
|
||||
"${args_OUT_DIR}/${stem}Lexer.hpp"
|
||||
"${args_OUT_DIR}/${stem}Lexer.cpp"
|
||||
"${args_OUT_DIR}/${stem}Parser.hpp"
|
||||
"${args_OUT_DIR}/${stem}Parser.cpp")
|
||||
|
||||
add_custom_command(
|
||||
DEPENDS
|
||||
${args_IN_FILE}
|
||||
OUTPUT ${outputs}
|
||||
# Remove #ifdef'ed code from the grammar source code
|
||||
COMMAND sed -e "/^#if 0/,/^#endif/d" "${args_IN_FILE}" > "${args_OUT_DIR}/${in_file_pure_name}"
|
||||
COMMAND antlr3 "${args_OUT_DIR}/${in_file_pure_name}"
|
||||
# We replace many local `ExceptionBaseType* ex` variables with a single function-scope one.
|
||||
# Because we add such a variable to every function, and because `ExceptionBaseType` is not a global
|
||||
# name, we also add a global typedef to avoid compilation errors.
|
||||
COMMAND sed -i -e "/^.*On :.*$/d" "${args_OUT_DIR}/${stem}Lexer.hpp"
|
||||
COMMAND sed -i -e "/^.*On :.*$/d" "${args_OUT_DIR}/${stem}Lexer.cpp"
|
||||
COMMAND sed -i -e "/^.*On :.*$/d" "${args_OUT_DIR}/${stem}Parser.hpp"
|
||||
COMMAND sed -i
|
||||
-e "s/^\\( *\\)\\(ImplTraits::CommonTokenType\\* [a-zA-Z0-9_]* = NULL;\\)$/\\1const \\2/"
|
||||
-e "/^.*On :.*$/d"
|
||||
-e "1i using ExceptionBaseType = int;"
|
||||
-e "s/^{/{ ExceptionBaseType\\* ex = nullptr;/; s/ExceptionBaseType\\* ex = new/ex = new/; s/exceptions::syntax_exception e/exceptions::syntax_exception\\& e/"
|
||||
"${args_OUT_DIR}/${stem}Parser.cpp"
|
||||
VERBATIM)
|
||||
|
||||
add_custom_target(${args_TARGET}
|
||||
DEPENDS ${outputs})
|
||||
|
||||
set(${args_VAR} ${outputs} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set(antlr3_grammar_files
|
||||
cql3/Cql.g
|
||||
alternator/expressions.g)
|
||||
|
||||
set(antlr3_gen_files)
|
||||
|
||||
foreach(f ${antlr3_grammar_files})
|
||||
get_filename_component(grammar_file_name "${f}" NAME_WE)
|
||||
get_filename_component(f_dir "${f}" DIRECTORY)
|
||||
scylla_generate_antlr3(
|
||||
TARGET scylla_antlr3_gen_${grammar_file_name}
|
||||
VAR scylla_antlr3_gen_${grammar_file_name}_files
|
||||
IN_FILE "${CMAKE_SOURCE_DIR}/${f}"
|
||||
OUT_DIR ${scylla_gen_build_dir}/${f_dir})
|
||||
list(APPEND antlr3_gen_files "${scylla_antlr3_gen_${grammar_file_name}_files}")
|
||||
endforeach()
|
||||
|
||||
# Generate C++ sources from ragel grammar files
|
||||
seastar_generate_ragel(
|
||||
TARGET scylla_ragel_gen_protocol_parser
|
||||
VAR scylla_ragel_gen_protocol_parser_file
|
||||
IN_FILE "${CMAKE_SOURCE_DIR}/redis/protocol_parser.rl"
|
||||
OUT_FILE ${scylla_gen_build_dir}/redis/protocol_parser.hh)
|
||||
|
||||
# Generate C++ sources from Swagger definitions
|
||||
set(swagger_files
|
||||
api/api-doc/cache_service.json
|
||||
api/api-doc/collectd.json
|
||||
api/api-doc/column_family.json
|
||||
api/api-doc/commitlog.json
|
||||
api/api-doc/compaction_manager.json
|
||||
api/api-doc/config.json
|
||||
api/api-doc/endpoint_snitch_info.json
|
||||
api/api-doc/error_injection.json
|
||||
api/api-doc/failure_detector.json
|
||||
api/api-doc/gossiper.json
|
||||
api/api-doc/hinted_handoff.json
|
||||
api/api-doc/lsa.json
|
||||
api/api-doc/messaging_service.json
|
||||
api/api-doc/storage_proxy.json
|
||||
api/api-doc/storage_service.json
|
||||
api/api-doc/stream_manager.json
|
||||
api/api-doc/system.json
|
||||
api/api-doc/utils.json)
|
||||
|
||||
set(swagger_gen_files)
|
||||
|
||||
foreach(f ${swagger_files})
|
||||
get_filename_component(fname "${f}" NAME_WE)
|
||||
get_filename_component(dir "${f}" DIRECTORY)
|
||||
seastar_generate_swagger(
|
||||
TARGET scylla_swagger_gen_${fname}
|
||||
VAR scylla_swagger_gen_${fname}_files
|
||||
IN_FILE "${CMAKE_SOURCE_DIR}/${f}"
|
||||
OUT_DIR "${scylla_gen_build_dir}/${dir}")
|
||||
list(APPEND swagger_gen_files "${scylla_swagger_gen_${fname}_files}")
|
||||
endforeach()
|
||||
|
||||
# Create C++ bindings for IDL serializers
|
||||
function(scylla_generate_idl_serializer)
|
||||
set(one_value_args TARGET VAR IN_FILE OUT_FILE)
|
||||
cmake_parse_arguments(args "" "${one_value_args}" "" ${ARGN})
|
||||
get_filename_component(out_dir ${args_OUT_FILE} DIRECTORY)
|
||||
set(idl_compiler "${CMAKE_SOURCE_DIR}/idl-compiler.py")
|
||||
|
||||
find_package(Python3 COMPONENTS Interpreter)
|
||||
|
||||
add_custom_command(
|
||||
DEPENDS
|
||||
${args_IN_FILE}
|
||||
${idl_compiler}
|
||||
OUTPUT ${args_OUT_FILE}
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${out_dir}
|
||||
COMMAND Python3::Interpreter ${idl_compiler} --ns ser -f ${args_IN_FILE} -o ${args_OUT_FILE})
|
||||
|
||||
add_custom_target(${args_TARGET}
|
||||
DEPENDS ${args_OUT_FILE})
|
||||
|
||||
set(${args_VAR} ${args_OUT_FILE} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set(idl_serializers
|
||||
idl/cache_temperature.idl.hh
|
||||
idl/commitlog.idl.hh
|
||||
idl/consistency_level.idl.hh
|
||||
idl/frozen_mutation.idl.hh
|
||||
idl/frozen_schema.idl.hh
|
||||
idl/gossip_digest.idl.hh
|
||||
idl/hinted_handoff.idl.hh
|
||||
idl/idl_test.idl.hh
|
||||
idl/keys.idl.hh
|
||||
idl/messaging_service.idl.hh
|
||||
idl/mutation.idl.hh
|
||||
idl/paging_state.idl.hh
|
||||
idl/partition_checksum.idl.hh
|
||||
idl/paxos.idl.hh
|
||||
idl/query.idl.hh
|
||||
idl/raft.idl.hh
|
||||
idl/range.idl.hh
|
||||
idl/read_command.idl.hh
|
||||
idl/reconcilable_result.idl.hh
|
||||
idl/replay_position.idl.hh
|
||||
idl/result.idl.hh
|
||||
idl/ring_position.idl.hh
|
||||
idl/streaming.idl.hh
|
||||
idl/token.idl.hh
|
||||
idl/tracing.idl.hh
|
||||
idl/truncation_record.idl.hh
|
||||
idl/uuid.idl.hh
|
||||
idl/view.idl.hh)
|
||||
|
||||
set(idl_gen_files)
|
||||
|
||||
foreach(f ${idl_serializers})
|
||||
get_filename_component(idl_name "${f}" NAME)
|
||||
get_filename_component(idl_target "${idl_name}" NAME_WE)
|
||||
get_filename_component(idl_dir "${f}" DIRECTORY)
|
||||
string(REPLACE ".idl.hh" ".dist.hh" idl_out_hdr_name "${idl_name}")
|
||||
scylla_generate_idl_serializer(
|
||||
TARGET scylla_idl_gen_${idl_target}
|
||||
VAR scylla_idl_gen_${idl_target}_files
|
||||
IN_FILE "${CMAKE_SOURCE_DIR}/${f}"
|
||||
OUT_FILE ${scylla_gen_build_dir}/${idl_dir}/${idl_out_hdr_name})
|
||||
list(APPEND idl_gen_files "${scylla_idl_gen_${idl_target}_files}")
|
||||
endforeach()
|
||||
|
||||
set(scylla_sources
|
||||
absl-flat_hash_map.cc
|
||||
alternator/auth.cc
|
||||
alternator/conditions.cc
|
||||
alternator/controller.cc
|
||||
alternator/executor.cc
|
||||
alternator/expressions.cc
|
||||
alternator/serialization.cc
|
||||
alternator/server.cc
|
||||
alternator/stats.cc
|
||||
alternator/streams.cc
|
||||
api/api.cc
|
||||
api/cache_service.cc
|
||||
api/collectd.cc
|
||||
api/column_family.cc
|
||||
api/commitlog.cc
|
||||
api/compaction_manager.cc
|
||||
api/config.cc
|
||||
api/endpoint_snitch.cc
|
||||
api/error_injection.cc
|
||||
api/failure_detector.cc
|
||||
api/gossiper.cc
|
||||
api/hinted_handoff.cc
|
||||
api/lsa.cc
|
||||
api/messaging_service.cc
|
||||
api/storage_proxy.cc
|
||||
api/storage_service.cc
|
||||
api/stream_manager.cc
|
||||
api/system.cc
|
||||
atomic_cell.cc
|
||||
auth/allow_all_authenticator.cc
|
||||
auth/allow_all_authorizer.cc
|
||||
auth/authenticated_user.cc
|
||||
auth/authentication_options.cc
|
||||
auth/authenticator.cc
|
||||
auth/common.cc
|
||||
auth/default_authorizer.cc
|
||||
auth/password_authenticator.cc
|
||||
auth/passwords.cc
|
||||
auth/permission.cc
|
||||
auth/permissions_cache.cc
|
||||
auth/resource.cc
|
||||
auth/role_or_anonymous.cc
|
||||
auth/roles-metadata.cc
|
||||
auth/sasl_challenge.cc
|
||||
auth/service.cc
|
||||
auth/standard_role_manager.cc
|
||||
auth/transitional.cc
|
||||
bytes.cc
|
||||
client_data.cc
|
||||
caching_options.cc
|
||||
canonical_mutation.cc
|
||||
cdc/cdc_partitioner.cc
|
||||
cdc/generation.cc
|
||||
cdc/log.cc
|
||||
cdc/metadata.cc
|
||||
cdc/split.cc
|
||||
clocks-impl.cc
|
||||
collection_mutation.cc
|
||||
compaction/compaction.cc
|
||||
compaction/compaction_manager.cc
|
||||
compaction/compaction_strategy.cc
|
||||
compaction/leveled_compaction_strategy.cc
|
||||
compaction/size_tiered_compaction_strategy.cc
|
||||
compaction/time_window_compaction_strategy.cc
|
||||
compress.cc
|
||||
connection_notifier.cc
|
||||
converting_mutation_partition_applier.cc
|
||||
counters.cc
|
||||
direct_failure_detector/failure_detector.cc
|
||||
cql3/abstract_marker.cc
|
||||
cql3/attributes.cc
|
||||
cql3/cf_name.cc
|
||||
cql3/column_condition.cc
|
||||
cql3/column_identifier.cc
|
||||
cql3/column_specification.cc
|
||||
cql3/constants.cc
|
||||
cql3/cql3_type.cc
|
||||
cql3/expr/expression.cc
|
||||
cql3/expr/prepare_expr.cc
|
||||
cql3/functions/aggregate_fcts.cc
|
||||
cql3/functions/castas_fcts.cc
|
||||
cql3/functions/error_injection_fcts.cc
|
||||
cql3/functions/functions.cc
|
||||
cql3/functions/user_function.cc
|
||||
cql3/index_name.cc
|
||||
cql3/keyspace_element_name.cc
|
||||
cql3/lists.cc
|
||||
cql3/maps.cc
|
||||
cql3/operation.cc
|
||||
cql3/prepare_context.cc
|
||||
cql3/query_options.cc
|
||||
cql3/query_processor.cc
|
||||
cql3/relation.cc
|
||||
cql3/restrictions/statement_restrictions.cc
|
||||
cql3/result_set.cc
|
||||
cql3/role_name.cc
|
||||
cql3/selection/abstract_function_selector.cc
|
||||
cql3/selection/selectable.cc
|
||||
cql3/selection/selection.cc
|
||||
cql3/selection/selector.cc
|
||||
cql3/selection/selector_factories.cc
|
||||
cql3/selection/simple_selector.cc
|
||||
cql3/sets.cc
|
||||
cql3/single_column_relation.cc
|
||||
cql3/statements/alter_keyspace_statement.cc
|
||||
cql3/statements/alter_service_level_statement.cc
|
||||
cql3/statements/alter_table_statement.cc
|
||||
cql3/statements/alter_type_statement.cc
|
||||
cql3/statements/alter_view_statement.cc
|
||||
cql3/statements/attach_service_level_statement.cc
|
||||
cql3/statements/authentication_statement.cc
|
||||
cql3/statements/authorization_statement.cc
|
||||
cql3/statements/batch_statement.cc
|
||||
cql3/statements/cas_request.cc
|
||||
cql3/statements/cf_prop_defs.cc
|
||||
cql3/statements/cf_statement.cc
|
||||
cql3/statements/create_aggregate_statement.cc
|
||||
cql3/statements/create_function_statement.cc
|
||||
cql3/statements/create_index_statement.cc
|
||||
cql3/statements/create_keyspace_statement.cc
|
||||
cql3/statements/create_service_level_statement.cc
|
||||
cql3/statements/create_table_statement.cc
|
||||
cql3/statements/create_type_statement.cc
|
||||
cql3/statements/create_view_statement.cc
|
||||
cql3/statements/delete_statement.cc
|
||||
cql3/statements/detach_service_level_statement.cc
|
||||
cql3/statements/drop_aggregate_statement.cc
|
||||
cql3/statements/drop_function_statement.cc
|
||||
cql3/statements/drop_index_statement.cc
|
||||
cql3/statements/drop_keyspace_statement.cc
|
||||
cql3/statements/drop_service_level_statement.cc
|
||||
cql3/statements/drop_table_statement.cc
|
||||
cql3/statements/drop_type_statement.cc
|
||||
cql3/statements/drop_view_statement.cc
|
||||
cql3/statements/function_statement.cc
|
||||
cql3/statements/grant_statement.cc
|
||||
cql3/statements/index_prop_defs.cc
|
||||
cql3/statements/index_target.cc
|
||||
cql3/statements/ks_prop_defs.cc
|
||||
cql3/statements/list_permissions_statement.cc
|
||||
cql3/statements/list_service_level_attachments_statement.cc
|
||||
cql3/statements/list_service_level_statement.cc
|
||||
cql3/statements/list_users_statement.cc
|
||||
cql3/statements/modification_statement.cc
|
||||
cql3/statements/permission_altering_statement.cc
|
||||
cql3/statements/property_definitions.cc
|
||||
cql3/statements/raw/parsed_statement.cc
|
||||
cql3/statements/revoke_statement.cc
|
||||
cql3/statements/role-management-statements.cc
|
||||
cql3/statements/schema_altering_statement.cc
|
||||
cql3/statements/select_statement.cc
|
||||
cql3/statements/service_level_statement.cc
|
||||
cql3/statements/sl_prop_defs.cc
|
||||
cql3/statements/truncate_statement.cc
|
||||
cql3/statements/update_statement.cc
|
||||
cql3/statements/use_statement.cc
|
||||
cql3/token_relation.cc
|
||||
cql3/type_json.cc
|
||||
cql3/untyped_result_set.cc
|
||||
cql3/update_parameters.cc
|
||||
cql3/user_types.cc
|
||||
cql3/util.cc
|
||||
cql3/ut_name.cc
|
||||
cql3/values.cc
|
||||
data_dictionary/data_dictionary.cc
|
||||
db/batchlog_manager.cc
|
||||
db/commitlog/commitlog.cc
|
||||
db/commitlog/commitlog_entry.cc
|
||||
db/commitlog/commitlog_replayer.cc
|
||||
db/config.cc
|
||||
db/consistency_level.cc
|
||||
db/cql_type_parser.cc
|
||||
db/data_listeners.cc
|
||||
db/extensions.cc
|
||||
db/heat_load_balance.cc
|
||||
db/hints/host_filter.cc
|
||||
db/hints/manager.cc
|
||||
db/hints/resource_manager.cc
|
||||
db/hints/sync_point.cc
|
||||
db/large_data_handler.cc
|
||||
db/legacy_schema_migrator.cc
|
||||
db/marshal/type_parser.cc
|
||||
db/schema_tables.cc
|
||||
db/size_estimates_virtual_reader.cc
|
||||
db/snapshot-ctl.cc
|
||||
db/sstables-format-selector.cc
|
||||
db/system_distributed_keyspace.cc
|
||||
db/system_keyspace.cc
|
||||
db/view/row_locking.cc
|
||||
db/view/view.cc
|
||||
db/view/view_update_generator.cc
|
||||
db/virtual_table.cc
|
||||
dht/boot_strapper.cc
|
||||
dht/i_partitioner.cc
|
||||
dht/murmur3_partitioner.cc
|
||||
dht/range_streamer.cc
|
||||
dht/token.cc
|
||||
distributed_loader.cc
|
||||
duration.cc
|
||||
exceptions/exceptions.cc
|
||||
flat_mutation_reader.cc
|
||||
frozen_mutation.cc
|
||||
frozen_schema.cc
|
||||
generic_server.cc
|
||||
debug.cc
|
||||
gms/application_state.cc
|
||||
gms/endpoint_state.cc
|
||||
gms/failure_detector.cc
|
||||
gms/feature_service.cc
|
||||
gms/gossip_digest_ack2.cc
|
||||
gms/gossip_digest_ack.cc
|
||||
gms/gossip_digest_syn.cc
|
||||
gms/gossiper.cc
|
||||
gms/inet_address.cc
|
||||
gms/versioned_value.cc
|
||||
gms/version_generator.cc
|
||||
hashers.cc
|
||||
index/secondary_index.cc
|
||||
index/secondary_index_manager.cc
|
||||
init.cc
|
||||
keys.cc
|
||||
lister.cc
|
||||
locator/abstract_replication_strategy.cc
|
||||
locator/azure_snitch.cc
|
||||
locator/ec2_multi_region_snitch.cc
|
||||
locator/ec2_snitch.cc
|
||||
locator/everywhere_replication_strategy.cc
|
||||
locator/gce_snitch.cc
|
||||
locator/gossiping_property_file_snitch.cc
|
||||
locator/local_strategy.cc
|
||||
locator/network_topology_strategy.cc
|
||||
locator/production_snitch_base.cc
|
||||
locator/rack_inferring_snitch.cc
|
||||
locator/simple_snitch.cc
|
||||
locator/simple_strategy.cc
|
||||
locator/snitch_base.cc
|
||||
locator/token_metadata.cc
|
||||
lang/lua.cc
|
||||
main.cc
|
||||
memtable.cc
|
||||
message/messaging_service.cc
|
||||
multishard_mutation_query.cc
|
||||
mutation.cc
|
||||
mutation_fragment.cc
|
||||
mutation_partition.cc
|
||||
mutation_partition_serializer.cc
|
||||
mutation_partition_view.cc
|
||||
mutation_query.cc
|
||||
mutation_reader.cc
|
||||
mutation_writer/feed_writers.cc
|
||||
mutation_writer/multishard_writer.cc
|
||||
mutation_writer/partition_based_splitting_writer.cc
|
||||
mutation_writer/shard_based_splitting_writer.cc
|
||||
mutation_writer/timestamp_based_splitting_writer.cc
|
||||
partition_slice_builder.cc
|
||||
partition_version.cc
|
||||
querier.cc
|
||||
query.cc
|
||||
query_ranges_to_vnodes.cc
|
||||
query-result-set.cc
|
||||
raft/fsm.cc
|
||||
raft/log.cc
|
||||
raft/raft.cc
|
||||
raft/server.cc
|
||||
raft/tracker.cc
|
||||
range_tombstone.cc
|
||||
range_tombstone_list.cc
|
||||
tombstone_gc_options.cc
|
||||
tombstone_gc.cc
|
||||
reader_concurrency_semaphore.cc
|
||||
redis/abstract_command.cc
|
||||
redis/command_factory.cc
|
||||
redis/commands.cc
|
||||
redis/keyspace_utils.cc
|
||||
redis/lolwut.cc
|
||||
redis/mutation_utils.cc
|
||||
redis/options.cc
|
||||
redis/query_processor.cc
|
||||
redis/query_utils.cc
|
||||
redis/server.cc
|
||||
redis/service.cc
|
||||
redis/stats.cc
|
||||
release.cc
|
||||
repair/repair.cc
|
||||
repair/row_level.cc
|
||||
replica/database.cc
|
||||
replica/table.cc
|
||||
row_cache.cc
|
||||
schema.cc
|
||||
schema_mutations.cc
|
||||
schema_registry.cc
|
||||
serializer.cc
|
||||
sstables_loader.cc
|
||||
service/client_state.cc
|
||||
service/migration_manager.cc
|
||||
service/misc_services.cc
|
||||
service/pager/paging_state.cc
|
||||
service/pager/query_pagers.cc
|
||||
service/paxos/paxos_state.cc
|
||||
service/paxos/prepare_response.cc
|
||||
service/paxos/prepare_summary.cc
|
||||
service/paxos/proposal.cc
|
||||
service/priority_manager.cc
|
||||
service/qos/qos_common.cc
|
||||
service/qos/service_level_controller.cc
|
||||
service/qos/standard_service_level_distributed_data_accessor.cc
|
||||
service/raft/raft_gossip_failure_detector.cc
|
||||
service/raft/raft_group_registry.cc
|
||||
service/raft/raft_rpc.cc
|
||||
service/raft/raft_sys_table_storage.cc
|
||||
service/raft/group0_state_machine.cc
|
||||
service/storage_proxy.cc
|
||||
service/storage_service.cc
|
||||
sstables/compress.cc
|
||||
sstables/integrity_checked_file_impl.cc
|
||||
sstables/kl/reader.cc
|
||||
sstables/metadata_collector.cc
|
||||
sstables/m_format_read_helpers.cc
|
||||
sstables/mx/reader.cc
|
||||
sstables/mx/writer.cc
|
||||
sstables/prepended_input_stream.cc
|
||||
sstables/random_access_reader.cc
|
||||
sstables/sstable_directory.cc
|
||||
sstables/sstable_mutation_reader.cc
|
||||
sstables/sstables.cc
|
||||
sstables/sstable_set.cc
|
||||
sstables/sstables_manager.cc
|
||||
sstables/sstable_version.cc
|
||||
sstables/writer.cc
|
||||
streaming/consumer.cc
|
||||
streaming/progress_info.cc
|
||||
streaming/session_info.cc
|
||||
streaming/stream_coordinator.cc
|
||||
streaming/stream_manager.cc
|
||||
streaming/stream_plan.cc
|
||||
streaming/stream_reason.cc
|
||||
streaming/stream_receive_task.cc
|
||||
streaming/stream_request.cc
|
||||
streaming/stream_result_future.cc
|
||||
streaming/stream_session.cc
|
||||
streaming/stream_session_state.cc
|
||||
streaming/stream_summary.cc
|
||||
streaming/stream_task.cc
|
||||
streaming/stream_transfer_task.cc
|
||||
table_helper.cc
|
||||
tasks/task_manager.cc
|
||||
thrift/controller.cc
|
||||
thrift/handler.cc
|
||||
thrift/server.cc
|
||||
thrift/thrift_validation.cc
|
||||
timeout_config.cc
|
||||
tools/scylla-sstable-index.cc
|
||||
tools/scylla-types.cc
|
||||
tracing/traced_file.cc
|
||||
tracing/trace_keyspace_helper.cc
|
||||
tracing/trace_state.cc
|
||||
tracing/tracing_backend_registry.cc
|
||||
tracing/tracing.cc
|
||||
transport/controller.cc
|
||||
transport/cql_protocol_extension.cc
|
||||
transport/event.cc
|
||||
transport/event_notifier.cc
|
||||
transport/messages/result_message.cc
|
||||
transport/server.cc
|
||||
types.cc
|
||||
unimplemented.cc
|
||||
utils/arch/powerpc/crc32-vpmsum/crc32_wrapper.cc
|
||||
utils/array-search.cc
|
||||
utils/ascii.cc
|
||||
utils/base64.cc
|
||||
utils/big_decimal.cc
|
||||
utils/bloom_calculations.cc
|
||||
utils/bloom_filter.cc
|
||||
utils/buffer_input_stream.cc
|
||||
utils/build_id.cc
|
||||
utils/config_file.cc
|
||||
utils/directories.cc
|
||||
utils/disk-error-handler.cc
|
||||
utils/dynamic_bitset.cc
|
||||
utils/error_injection.cc
|
||||
utils/exceptions.cc
|
||||
utils/file_lock.cc
|
||||
utils/generation-number.cc
|
||||
utils/gz/crc_combine.cc
|
||||
utils/gz/gen_crc_combine_table.cc
|
||||
utils/human_readable.cc
|
||||
utils/i_filter.cc
|
||||
utils/large_bitset.cc
|
||||
utils/like_matcher.cc
|
||||
utils/limiting_data_source.cc
|
||||
utils/logalloc.cc
|
||||
utils/managed_bytes.cc
|
||||
utils/multiprecision_int.cc
|
||||
utils/murmur_hash.cc
|
||||
utils/rate_limiter.cc
|
||||
utils/rjson.cc
|
||||
utils/runtime.cc
|
||||
utils/updateable_value.cc
|
||||
utils/utf8.cc
|
||||
utils/uuid.cc
|
||||
utils/UUID_gen.cc
|
||||
validation.cc
|
||||
vint-serialization.cc
|
||||
zstd.cc)
|
||||
target_link_libraries(scylla-main
|
||||
PRIVATE
|
||||
db
|
||||
absl::hash
|
||||
absl::raw_hash_set
|
||||
Seastar::seastar
|
||||
Snappy::snappy
|
||||
systemd
|
||||
ZLIB::ZLIB)
|
||||
add_subdirectory(api)
|
||||
add_subdirectory(alternator)
|
||||
add_subdirectory(db)
|
||||
add_subdirectory(auth)
|
||||
add_subdirectory(cdc)
|
||||
add_subdirectory(compaction)
|
||||
add_subdirectory(cql3)
|
||||
add_subdirectory(data_dictionary)
|
||||
add_subdirectory(dht)
|
||||
add_subdirectory(gms)
|
||||
add_subdirectory(idl)
|
||||
add_subdirectory(index)
|
||||
add_subdirectory(interface)
|
||||
add_subdirectory(lang)
|
||||
add_subdirectory(locator)
|
||||
add_subdirectory(mutation)
|
||||
add_subdirectory(mutation_writer)
|
||||
add_subdirectory(node_ops)
|
||||
add_subdirectory(readers)
|
||||
add_subdirectory(redis)
|
||||
add_subdirectory(replica)
|
||||
add_subdirectory(raft)
|
||||
add_subdirectory(repair)
|
||||
add_subdirectory(rust)
|
||||
add_subdirectory(schema)
|
||||
add_subdirectory(service)
|
||||
add_subdirectory(sstables)
|
||||
add_subdirectory(streaming)
|
||||
add_subdirectory(test)
|
||||
add_subdirectory(thrift)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(tracing)
|
||||
add_subdirectory(transport)
|
||||
add_subdirectory(types)
|
||||
add_subdirectory(utils)
|
||||
add_version_library(scylla_version
|
||||
release.cc)
|
||||
|
||||
set(scylla_gen_sources
|
||||
"${scylla_thrift_gen_cassandra_files}"
|
||||
"${scylla_ragel_gen_protocol_parser_file}"
|
||||
"${swagger_gen_files}"
|
||||
"${idl_gen_files}"
|
||||
"${antlr3_gen_files}")
|
||||
|
||||
add_executable(scylla
|
||||
main.cc)
|
||||
target_link_libraries(scylla PRIVATE
|
||||
scylla-main
|
||||
api
|
||||
auth
|
||||
alternator
|
||||
db
|
||||
cdc
|
||||
compaction
|
||||
cql3
|
||||
data_dictionary
|
||||
dht
|
||||
gms
|
||||
idl
|
||||
index
|
||||
lang
|
||||
locator
|
||||
mutation
|
||||
mutation_writer
|
||||
raft
|
||||
readers
|
||||
redis
|
||||
repair
|
||||
replica
|
||||
schema
|
||||
scylla_version
|
||||
service
|
||||
sstables
|
||||
streaming
|
||||
test-perf
|
||||
thrift
|
||||
tools
|
||||
tracing
|
||||
transport
|
||||
types
|
||||
utils)
|
||||
target_link_libraries(Boost::regex
|
||||
INTERFACE
|
||||
ICU::i18n
|
||||
ICU::uc)
|
||||
${scylla_sources}
|
||||
${scylla_gen_sources})
|
||||
|
||||
target_link_libraries(scylla PRIVATE
|
||||
seastar
|
||||
Boost::program_options)
|
||||
|
||||
# Force SHA1 build-id generation
|
||||
set(default_linker_flags "-Wl,--build-id=sha1")
|
||||
include(CheckLinkerFlag)
|
||||
set(Scylla_USE_LINKER
|
||||
""
|
||||
CACHE
|
||||
STRING
|
||||
"Use specified linker instead of the default one")
|
||||
if(Scylla_USE_LINKER)
|
||||
set(linkers "${Scylla_USE_LINKER}")
|
||||
else()
|
||||
set(linkers "lld" "gold")
|
||||
endif()
|
||||
|
||||
foreach(linker ${linkers})
|
||||
set(linker_flag "-fuse-ld=${linker}")
|
||||
check_linker_flag(CXX ${linker_flag} "CXX_LINKER_HAVE_${linker}")
|
||||
if(CXX_LINKER_HAVE_${linker})
|
||||
string(APPEND default_linker_flags " ${linker_flag}")
|
||||
break()
|
||||
elseif(Scylla_USE_LINKER)
|
||||
message(FATAL_ERROR "${Scylla_USE_LINKER} is not supported.")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${default_linker_flags}" CACHE INTERNAL "")
|
||||
# Boost dependencies
|
||||
Boost::filesystem
|
||||
Boost::program_options
|
||||
Boost::system
|
||||
Boost::thread
|
||||
Boost::regex
|
||||
Boost::headers
|
||||
# Abseil libs
|
||||
absl::hashtablez_sampler
|
||||
absl::raw_hash_set
|
||||
absl::synchronization
|
||||
absl::graphcycles_internal
|
||||
absl::stacktrace
|
||||
absl::symbolize
|
||||
absl::debugging_internal
|
||||
absl::demangle_internal
|
||||
absl::time
|
||||
absl::time_zone
|
||||
absl::int128
|
||||
absl::city
|
||||
absl::hash
|
||||
absl::malloc_internal
|
||||
absl::spinlock_wait
|
||||
absl::base
|
||||
absl::dynamic_annotations
|
||||
absl::raw_logging_internal
|
||||
absl::exponential_biased
|
||||
absl::throw_delegate
|
||||
# System libs
|
||||
ZLIB::ZLIB
|
||||
ICU::uc
|
||||
systemd
|
||||
zstd
|
||||
snappy
|
||||
${LUA_LIBRARIES}
|
||||
thrift
|
||||
crypt)
|
||||
|
||||
target_link_libraries(scylla PRIVATE
|
||||
-Wl,--build-id=sha1 # Force SHA1 build-id generation
|
||||
# TODO: Use lld linker if it's available, otherwise gold, else bfd
|
||||
-fuse-ld=lld)
|
||||
# TODO: patch dynamic linker to match configure.py behavior
|
||||
|
||||
target_compile_options(scylla PRIVATE
|
||||
-std=gnu++20
|
||||
${cxx_coro_flag}
|
||||
${target_arch_flag})
|
||||
# Hacks needed to expose internal APIs for xxhash dependencies
|
||||
target_compile_definitions(scylla PRIVATE XXH_PRIVATE_API HAVE_LZ4_COMPRESS_DEFAULT)
|
||||
|
||||
target_include_directories(scylla PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
libdeflate
|
||||
abseil
|
||||
"${scylla_gen_build_dir}")
|
||||
|
||||
add_subdirectory(dist)
|
||||
###
|
||||
### Create crc_combine_table helper executable.
|
||||
### Use it to generate crc_combine_table.cc to be used in scylla at build time.
|
||||
###
|
||||
add_executable(crc_combine_table utils/gz/gen_crc_combine_table.cc)
|
||||
target_link_libraries(crc_combine_table PRIVATE seastar)
|
||||
target_include_directories(crc_combine_table PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
|
||||
target_compile_options(crc_combine_table PRIVATE
|
||||
-std=gnu++20
|
||||
${cxx_coro_flag}
|
||||
${target_arch_flag})
|
||||
add_dependencies(scylla crc_combine_table)
|
||||
|
||||
# Generate an additional source file at build time that is needed for Scylla compilation
|
||||
add_custom_command(OUTPUT "${scylla_gen_build_dir}/utils/gz/crc_combine_table.cc"
|
||||
COMMAND $<TARGET_FILE:crc_combine_table> > "${scylla_gen_build_dir}/utils/gz/crc_combine_table.cc"
|
||||
DEPENDS crc_combine_table)
|
||||
target_sources(scylla PRIVATE "${scylla_gen_build_dir}/utils/gz/crc_combine_table.cc")
|
||||
|
||||
###
|
||||
### Generate version file and supply appropriate compile definitions for release.cc
|
||||
###
|
||||
execute_process(COMMAND ${CMAKE_SOURCE_DIR}/SCYLLA-VERSION-GEN --output-dir "${CMAKE_BINARY_DIR}/gen" RESULT_VARIABLE scylla_version_gen_res)
|
||||
if(scylla_version_gen_res)
|
||||
message(SEND_ERROR "Version file generation failed. Return code: ${scylla_version_gen_res}")
|
||||
endif()
|
||||
|
||||
file(READ "${CMAKE_BINARY_DIR}/gen/SCYLLA-VERSION-FILE" scylla_version)
|
||||
string(STRIP "${scylla_version}" scylla_version)
|
||||
|
||||
file(READ "${CMAKE_BINARY_DIR}/gen/SCYLLA-RELEASE-FILE" scylla_release)
|
||||
string(STRIP "${scylla_release}" scylla_release)
|
||||
|
||||
get_property(release_cdefs SOURCE "${CMAKE_SOURCE_DIR}/release.cc" PROPERTY COMPILE_DEFINITIONS)
|
||||
list(APPEND release_cdefs "SCYLLA_VERSION=\"${scylla_version}\"" "SCYLLA_RELEASE=\"${scylla_release}\"")
|
||||
set_source_files_properties("${CMAKE_SOURCE_DIR}/release.cc" PROPERTIES COMPILE_DEFINITIONS "${release_cdefs}")
|
||||
|
||||
###
|
||||
### Custom command for building libdeflate. Link the library to scylla.
|
||||
###
|
||||
set(libdeflate_lib "${scylla_build_dir}/libdeflate/libdeflate.a")
|
||||
add_custom_command(OUTPUT "${libdeflate_lib}"
|
||||
COMMAND make -C "${CMAKE_SOURCE_DIR}/libdeflate"
|
||||
BUILD_DIR=../build/${BUILD_TYPE}/libdeflate/
|
||||
CC=${CMAKE_C_COMPILER}
|
||||
"CFLAGS=${target_arch_flag}"
|
||||
../build/${BUILD_TYPE}/libdeflate//libdeflate.a) # Two backslashes are important!
|
||||
# Hack to force generating custom command to produce libdeflate.a
|
||||
add_custom_target(libdeflate DEPENDS "${libdeflate_lib}")
|
||||
target_link_libraries(scylla PRIVATE "${libdeflate_lib}")
|
||||
|
||||
# TODO: create cmake/ directory and move utilities (generate functions etc) there
|
||||
# TODO: Build tests if BUILD_TESTING=on (using CTest module)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Asking questions or requesting help
|
||||
|
||||
Use the [ScyllaDB Community Forum](https://forum.scylladb.com) or the [Slack workspace](http://slack.scylladb.com) for general questions and help.
|
||||
Use the [Scylla Users mailing list](https://groups.google.com/g/scylladb-users) or the [Slack workspace](http://slack.scylladb.com) for general questions and help.
|
||||
|
||||
Join the [Scylla Developers mailing list](https://groups.google.com/g/scylladb-dev) for deeper technical discussions and to discuss your ideas for contributions.
|
||||
|
||||
@@ -18,5 +18,3 @@ If you need help formatting or sending patches, [check out these instructions](h
|
||||
The Scylla C++ source code uses the [Seastar coding style](https://github.com/scylladb/seastar/blob/master/coding-style.md) so please adhere to that in your patches. Note that Scylla code is written with `using namespace seastar`, so should not explicitly add the `seastar::` prefix to Seastar symbols. You will usually not need to add `using namespace seastar` to new source files, because most Scylla header files have `#include "seastarx.hh"`, which does this.
|
||||
|
||||
Header files in Scylla must be self-contained, i.e., each can be included without having to include specific other headers first. To verify that your change did not break this property, run `ninja dev-headers`. If you added or removed header files, you must `touch configure.py` first - this will cause `configure.py` to be automatically re-run to generate a fresh list of header files.
|
||||
|
||||
For more criteria on what reviewers consider good code, see the [review checklist](https://github.com/scylladb/scylla/blob/master/docs/dev/review-checklist.md).
|
||||
|
||||
38
HACKING.md
38
HACKING.md
@@ -195,7 +195,7 @@ $ # Edit configuration options as appropriate
|
||||
$ SCYLLA_HOME=$HOME/scylla build/release/scylla
|
||||
```
|
||||
|
||||
The `scylla.yaml` file in the repository by default writes all database data to `/var/lib/scylla`, which likely requires root access. Change the `data_file_directories`, `commitlog_directory` and `schema_commitlog_directory` fields as appropriate.
|
||||
The `scylla.yaml` file in the repository by default writes all database data to `/var/lib/scylla`, which likely requires root access. Change the `data_file_directories` and `commitlog_directory` fields as appropriate.
|
||||
|
||||
Scylla has a number of requirements for the file-system and operating system to operate ideally and at peak performance. However, during development, these requirements can be relaxed with the `--developer-mode` flag.
|
||||
|
||||
@@ -383,40 +383,6 @@ Open the link printed at the end. Be horrified. Go and write more tests.
|
||||
|
||||
For more details see `./scripts/coverage.py --help`.
|
||||
|
||||
### Resolving stack backtraces
|
||||
|
||||
Scylla may print stack backtraces to the log for several reasons.
|
||||
For example:
|
||||
- When aborting (e.g. due to assertion failure, internal error, or segfault)
|
||||
- When detecting seastar reactor stalls (where a seastar task runs for a long time without yielding the cpu to other tasks on that shard)
|
||||
|
||||
The backtraces contain code pointers so they are not very helpful without resolving into code locations.
|
||||
To resolve the backtraces, one needs the scylla relocatable package that contains the scylla binary (with debug information),
|
||||
as well as the dynamic libraries it is linked against.
|
||||
|
||||
Builds from our automated build system are uploaded to the cloud
|
||||
and can be searched on http://backtrace.scylladb.com/
|
||||
|
||||
Make sure you have the scylla server exact `build-id` to locate
|
||||
its respective relocatable package, required for decoding backtraces it prints.
|
||||
|
||||
The build-id is printed to the system log when scylla starts.
|
||||
It can also be found by executing `scylla --build-id`, or
|
||||
by using the `file` utility, for example:
|
||||
```
|
||||
$ scylla --build-id
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
|
||||
$ file scylla
|
||||
scylla: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=4cba12e6eb290a406bfa4930918db23941fd4be3, with debug_info, not stripped, too many notes (256)
|
||||
```
|
||||
|
||||
To find the build-id of a coredump, use the `eu-unstrip` utility as follows:
|
||||
```
|
||||
$ eu-unstrip -n --core <coredump> | awk '/scylla$/ { s=$2; sub(/@.*$/, "", s); print s; exit(0); }'
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
```
|
||||
|
||||
### Core dump debugging
|
||||
|
||||
See [debugging.md](docs/dev/debugging.md).
|
||||
See [debugging.md](debugging.md).
|
||||
|
||||
18
README.md
18
README.md
@@ -30,9 +30,9 @@ requirements - you just need to meet the frozen toolchain's prerequisites
|
||||
Building Scylla with the frozen toolchain `dbuild` is as easy as:
|
||||
|
||||
```bash
|
||||
$ git submodule update --init --force --recursive
|
||||
$ ./tools/toolchain/dbuild ./configure.py
|
||||
$ ./tools/toolchain/dbuild ninja build/release/scylla
|
||||
$ git submodule update --init --force --recursive
|
||||
$ ./tools/toolchain/dbuild ./configure.py
|
||||
$ ./tools/toolchain/dbuild ninja build/release/scylla
|
||||
```
|
||||
|
||||
For further information, please see:
|
||||
@@ -42,7 +42,7 @@ For further information, please see:
|
||||
* [Docker image build documentation] for information on how to build Docker images.
|
||||
|
||||
[developer documentation]: HACKING.md
|
||||
[build documentation]: docs/dev/building.md
|
||||
[build documentation]: docs/guides/building.md
|
||||
[docker image build documentation]: dist/docker/debian/README.md
|
||||
|
||||
## Running Scylla
|
||||
@@ -60,12 +60,12 @@ Please note that you need to run Scylla with `dbuild` if you built it with the f
|
||||
For more run options, run:
|
||||
|
||||
```bash
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --help
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --help
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
See [test.py manual](docs/dev/testing.md).
|
||||
See [test.py manual](docs/guides/testing.md).
|
||||
|
||||
## Scylla APIs and compatibility
|
||||
By default, Scylla is compatible with Apache Cassandra and its APIs - CQL and
|
||||
@@ -78,7 +78,7 @@ and the current compatibility of this feature as well as Scylla-specific extensi
|
||||
|
||||
## Documentation
|
||||
|
||||
Documentation can be found [here](docs/dev/README.md).
|
||||
Documentation can be found [here](https://scylla.docs.scylladb.com).
|
||||
Seastar documentation can be found [here](http://docs.seastar.io/master/index.html).
|
||||
User documentation can be found [here](https://docs.scylladb.com/).
|
||||
|
||||
@@ -100,10 +100,10 @@ If you are a developer working on Scylla, please read the [developer guidelines]
|
||||
|
||||
## Contact
|
||||
|
||||
* The [community forum] and [Slack channel] are for users to discuss configuration, management, and operations of the ScyllaDB open source.
|
||||
* The [users mailing list] and [Slack channel] are for users to discuss configuration, management, and operations of the ScyllaDB open source.
|
||||
* The [developers mailing list] is for developers and people interested in following the development of ScyllaDB to discuss technical topics.
|
||||
|
||||
[Community forum]: https://forum.scylladb.com/
|
||||
[Users mailing list]: https://groups.google.com/forum/#!forum/scylladb-users
|
||||
|
||||
[Slack channel]: http://slack.scylladb.com/
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
USAGE=$(cat <<-END
|
||||
Usage: $(basename "$0") [-h|--help] [-o|--output-dir PATH] [--date-stamp DATE] -- generate Scylla version and build information files.
|
||||
Usage: $(basename "$0") [-h|--help] [-o|--output-dir PATH] -- generate Scylla version and build information files.
|
||||
|
||||
Options:
|
||||
-h|--help show this help message.
|
||||
-o|--output-dir PATH specify destination path at which the version files are to be created.
|
||||
-d|--date-stamp DATE manually set date for release parameter
|
||||
-v|--verbose also print out the version number
|
||||
|
||||
By default, the script will attempt to parse 'version' file
|
||||
in the current directory, which should contain a string of
|
||||
@@ -33,10 +31,7 @@ using '-o PATH' option.
|
||||
END
|
||||
)
|
||||
|
||||
DATE=""
|
||||
PRINT_VERSION=false
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
while [[ $# -gt 0 ]]; do
|
||||
opt="$1"
|
||||
case $opt in
|
||||
-h|--help)
|
||||
@@ -48,15 +43,6 @@ while [ $# -gt 0 ]; do
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--date-stamp)
|
||||
DATE="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
-v|--verbose)
|
||||
PRINT_VERSION=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected argument found: $1"
|
||||
echo
|
||||
@@ -72,33 +58,24 @@ if [ -z "$OUTPUT_DIR" ]; then
|
||||
OUTPUT_DIR="$SCRIPT_DIR/build"
|
||||
fi
|
||||
|
||||
if [ -z "$DATE" ]; then
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=5.4.10
|
||||
VERSION=5.0.13
|
||||
|
||||
if test -f version
|
||||
then
|
||||
SCYLLA_VERSION=$(cat version | awk -F'-' '{print $1}')
|
||||
SCYLLA_RELEASE=$(cat version | awk -F'-' '{print $2}')
|
||||
else
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1)
|
||||
SCYLLA_VERSION=$VERSION
|
||||
if [ -z "$SCYLLA_RELEASE" ]; then
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1 --abbrev=12)
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
elif [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
echo "setting SCYLLA_RELEASE only makes sense in clean builds" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
fi
|
||||
|
||||
if [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
@@ -108,9 +85,7 @@ if [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if $PRINT_VERSION; then
|
||||
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
||||
fi
|
||||
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
echo "$SCYLLA_VERSION" > "$OUTPUT_DIR/SCYLLA-VERSION-FILE"
|
||||
echo "$SCYLLA_RELEASE" > "$OUTPUT_DIR/SCYLLA-RELEASE-FILE"
|
||||
|
||||
1
abseil
Submodule
1
abseil
Submodule
Submodule abseil added at f70eadadd7
@@ -1,30 +0,0 @@
|
||||
include(generate_cql_grammar)
|
||||
generate_cql_grammar(
|
||||
GRAMMAR expressions.g
|
||||
SOURCES cql_grammar_srcs)
|
||||
|
||||
add_library(alternator STATIC)
|
||||
target_sources(alternator
|
||||
PRIVATE
|
||||
controller.cc
|
||||
server.cc
|
||||
executor.cc
|
||||
stats.cc
|
||||
serialization.cc
|
||||
expressions.cc
|
||||
conditions.cc
|
||||
auth.cc
|
||||
streams.cc
|
||||
ttl.cc
|
||||
${cql_grammar_srcs})
|
||||
target_include_directories(alternator
|
||||
PUBLIC
|
||||
${CMAKE_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}
|
||||
PRIVATE
|
||||
${RAPIDJSON_INCLUDE_DIRS})
|
||||
target_link_libraries(alternator
|
||||
cql3
|
||||
idl
|
||||
Seastar::seastar
|
||||
xxHash::xxhash)
|
||||
@@ -10,6 +10,8 @@
|
||||
#include "log.hh"
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <gnutls/crypto.h>
|
||||
#include "hashers.hh"
|
||||
#include "bytes.hh"
|
||||
#include "alternator/auth.hh"
|
||||
#include <fmt/format.h>
|
||||
@@ -27,6 +29,99 @@ namespace alternator {
|
||||
|
||||
static logging::logger alogger("alternator-auth");
|
||||
|
||||
static hmac_sha256_digest hmac_sha256(std::string_view key, std::string_view msg) {
|
||||
hmac_sha256_digest digest;
|
||||
int ret = gnutls_hmac_fast(GNUTLS_MAC_SHA256, key.data(), key.size(), msg.data(), msg.size(), digest.data());
|
||||
if (ret) {
|
||||
throw std::runtime_error(fmt::format("Computing HMAC failed ({}): {}", ret, gnutls_strerror(ret)));
|
||||
}
|
||||
return digest;
|
||||
}
|
||||
|
||||
static hmac_sha256_digest get_signature_key(std::string_view key, std::string_view date_stamp, std::string_view region_name, std::string_view service_name) {
|
||||
auto date = hmac_sha256("AWS4" + std::string(key), date_stamp);
|
||||
auto region = hmac_sha256(std::string_view(date.data(), date.size()), region_name);
|
||||
auto service = hmac_sha256(std::string_view(region.data(), region.size()), service_name);
|
||||
auto signing = hmac_sha256(std::string_view(service.data(), service.size()), "aws4_request");
|
||||
return signing;
|
||||
}
|
||||
|
||||
static std::string apply_sha256(std::string_view msg) {
|
||||
sha256_hasher hasher;
|
||||
hasher.update(msg.data(), msg.size());
|
||||
return to_hex(hasher.finalize());
|
||||
}
|
||||
|
||||
static std::string apply_sha256(const std::vector<temporary_buffer<char>>& msg) {
|
||||
sha256_hasher hasher;
|
||||
for (const temporary_buffer<char>& buf : msg) {
|
||||
hasher.update(buf.get(), buf.size());
|
||||
}
|
||||
return to_hex(hasher.finalize());
|
||||
}
|
||||
|
||||
static std::string format_time_point(db_clock::time_point tp) {
|
||||
time_t time_point_repr = db_clock::to_time_t(tp);
|
||||
std::string time_point_str;
|
||||
time_point_str.resize(17);
|
||||
::tm time_buf;
|
||||
// strftime prints the terminating null character as well
|
||||
std::strftime(time_point_str.data(), time_point_str.size(), "%Y%m%dT%H%M%SZ", ::gmtime_r(&time_point_repr, &time_buf));
|
||||
time_point_str.resize(16);
|
||||
return time_point_str;
|
||||
}
|
||||
|
||||
void check_expiry(std::string_view signature_date) {
|
||||
//FIXME: The default 15min can be changed with X-Amz-Expires header - we should honor it
|
||||
std::string expiration_str = format_time_point(db_clock::now() - 15min);
|
||||
std::string validity_str = format_time_point(db_clock::now() + 15min);
|
||||
if (signature_date < expiration_str) {
|
||||
throw api_error::invalid_signature(
|
||||
fmt::format("Signature expired: {} is now earlier than {} (current time - 15 min.)",
|
||||
signature_date, expiration_str));
|
||||
}
|
||||
if (signature_date > validity_str) {
|
||||
throw api_error::invalid_signature(
|
||||
fmt::format("Signature not yet current: {} is still later than {} (current time + 15 min.)",
|
||||
signature_date, validity_str));
|
||||
}
|
||||
}
|
||||
|
||||
std::string get_signature(std::string_view access_key_id, std::string_view secret_access_key, std::string_view host, std::string_view method,
|
||||
std::string_view orig_datestamp, std::string_view signed_headers_str, const std::map<std::string_view, std::string_view>& signed_headers_map,
|
||||
const std::vector<temporary_buffer<char>>& body_content, std::string_view region, std::string_view service, std::string_view query_string) {
|
||||
auto amz_date_it = signed_headers_map.find("x-amz-date");
|
||||
if (amz_date_it == signed_headers_map.end()) {
|
||||
throw api_error::invalid_signature("X-Amz-Date header is mandatory for signature verification");
|
||||
}
|
||||
std::string_view amz_date = amz_date_it->second;
|
||||
check_expiry(amz_date);
|
||||
std::string_view datestamp = amz_date.substr(0, 8);
|
||||
if (datestamp != orig_datestamp) {
|
||||
throw api_error::invalid_signature(
|
||||
format("X-Amz-Date date does not match the provided datestamp. Expected {}, got {}",
|
||||
orig_datestamp, datestamp));
|
||||
}
|
||||
std::string_view canonical_uri = "/";
|
||||
|
||||
std::stringstream canonical_headers;
|
||||
for (const auto& header : signed_headers_map) {
|
||||
canonical_headers << fmt::format("{}:{}", header.first, header.second) << '\n';
|
||||
}
|
||||
|
||||
std::string payload_hash = apply_sha256(body_content);
|
||||
std::string canonical_request = fmt::format("{}\n{}\n{}\n{}\n{}\n{}", method, canonical_uri, query_string, canonical_headers.str(), signed_headers_str, payload_hash);
|
||||
|
||||
std::string_view algorithm = "AWS4-HMAC-SHA256";
|
||||
std::string credential_scope = fmt::format("{}/{}/{}/aws4_request", datestamp, region, service);
|
||||
std::string string_to_sign = fmt::format("{}\n{}\n{}\n{}", algorithm, amz_date, credential_scope, apply_sha256(canonical_request));
|
||||
|
||||
hmac_sha256_digest signing_key = get_signature_key(secret_access_key, datestamp, region, service);
|
||||
hmac_sha256_digest signature = hmac_sha256(std::string_view(signing_key.data(), signing_key.size()), string_to_sign);
|
||||
|
||||
return to_hex(bytes_view(reinterpret_cast<const int8_t*>(signature.data()), signature.size()));
|
||||
}
|
||||
|
||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username) {
|
||||
schema_ptr schema = proxy.data_dictionary().find_schema("system_auth", "roles");
|
||||
partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username));
|
||||
@@ -34,28 +129,27 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::strin
|
||||
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
const column_definition* salted_hash_col = schema->get_column_definition(bytes("salted_hash"));
|
||||
if (!salted_hash_col) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
}
|
||||
auto selection = cql3::selection::selection::for_columns(schema, {salted_hash_col});
|
||||
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id}, selection->get_query_options());
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice,
|
||||
proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, proxy.get_max_result_size(partition_slice));
|
||||
auto cl = auth::password_authenticator::consistency_for_user(username);
|
||||
|
||||
service::client_state client_state{service::client_state::internal_tag()};
|
||||
service::storage_proxy::coordinator_query_result qr = co_await proxy.query(schema, std::move(command), std::move(partition_ranges), cl,
|
||||
service::storage_proxy::coordinator_query_options(executor::default_timeout(), empty_service_permit(), client_state));
|
||||
|
||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now());
|
||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now(), cql_serialization_format::latest());
|
||||
query::result_view::consume(*qr.query_result, partition_slice, cql3::selection::result_set_builder::visitor(builder, *schema, *selection));
|
||||
|
||||
auto result_set = builder.build();
|
||||
if (result_set->empty()) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
}
|
||||
const managed_bytes_opt& salted_hash = result_set->rows().front().front(); // We only asked for 1 row and 1 column
|
||||
const bytes_opt& salted_hash = result_set->rows().front().front(); // We only asked for 1 row and 1 column
|
||||
if (!salted_hash) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
co_return coroutine::make_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
}
|
||||
co_return value_cast<sstring>(utf8_type->deserialize(*salted_hash));
|
||||
}
|
||||
|
||||
@@ -20,8 +20,14 @@ class storage_proxy;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
using hmac_sha256_digest = std::array<char, 32>;
|
||||
|
||||
using key_cache = utils::loading_cache<std::string, std::string, 1>;
|
||||
|
||||
std::string get_signature(std::string_view access_key_id, std::string_view secret_access_key, std::string_view host, std::string_view method,
|
||||
std::string_view orig_datestamp, std::string_view signed_headers_str, const std::map<std::string_view, std::string_view>& signed_headers_map,
|
||||
const std::vector<temporary_buffer<char>>& body_content, std::string_view region, std::string_view service, std::string_view query_string);
|
||||
|
||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username);
|
||||
|
||||
}
|
||||
|
||||
@@ -232,14 +232,7 @@ bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2,
|
||||
if (it2->name == "S") {
|
||||
return rjson::to_string_view(it1->value).starts_with(rjson::to_string_view(it2->value));
|
||||
} else /* it2->name == "B" */ {
|
||||
try {
|
||||
return base64_begins_with(rjson::to_string_view(it1->value), rjson::to_string_view(it2->value));
|
||||
} catch(std::invalid_argument&) {
|
||||
// determine if any of the malformed values is from query and raise an exception if so
|
||||
unwrap_bytes(it1->value, v1_from_query);
|
||||
unwrap_bytes(it2->value, v2_from_query);
|
||||
return false;
|
||||
}
|
||||
return base64_begins_with(rjson::to_string_view(it1->value), rjson::to_string_view(it2->value));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +241,7 @@ static bool is_set_of(const rjson::value& type1, const rjson::value& type2) {
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the CONTAINS relation
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query) {
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2) {
|
||||
if (!v1) {
|
||||
return false;
|
||||
}
|
||||
@@ -257,12 +250,7 @@ bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from
|
||||
if (kv1.name == "S" && kv2.name == "S") {
|
||||
return rjson::to_string_view(kv1.value).find(rjson::to_string_view(kv2.value)) != std::string_view::npos;
|
||||
} else if (kv1.name == "B" && kv2.name == "B") {
|
||||
auto d_kv1 = unwrap_bytes(kv1.value, v1_from_query);
|
||||
auto d_kv2 = unwrap_bytes(kv2.value, v2_from_query);
|
||||
if (!d_kv1 || !d_kv2) {
|
||||
return false;
|
||||
}
|
||||
return d_kv1->find(*d_kv2) != bytes::npos;
|
||||
return rjson::base64_decode(kv1.value).find(rjson::base64_decode(kv2.value)) != bytes::npos;
|
||||
} else if (is_set_of(kv1.name, kv2.name)) {
|
||||
for (auto i = kv1.value.Begin(); i != kv1.value.End(); ++i) {
|
||||
if (*i == kv2.value) {
|
||||
@@ -285,11 +273,11 @@ bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the NOT_CONTAINS relation
|
||||
static bool check_NOT_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query) {
|
||||
static bool check_NOT_CONTAINS(const rjson::value* v1, const rjson::value& v2) {
|
||||
if (!v1) {
|
||||
return false;
|
||||
}
|
||||
return !check_CONTAINS(v1, v2, v1_from_query, v2_from_query);
|
||||
return !check_CONTAINS(v1, v2);
|
||||
}
|
||||
|
||||
// Check if a JSON-encoded value equals any element of an array, which must have at least one element.
|
||||
@@ -386,12 +374,7 @@ bool check_compare(const rjson::value* v1, const rjson::value& v2, const Compara
|
||||
std::string_view(kv2.value.GetString(), kv2.value.GetStringLength()));
|
||||
}
|
||||
if (kv1.name == "B") {
|
||||
auto d_kv1 = unwrap_bytes(kv1.value, v1_from_query);
|
||||
auto d_kv2 = unwrap_bytes(kv2.value, v2_from_query);
|
||||
if(!d_kv1 || !d_kv2) {
|
||||
return false;
|
||||
}
|
||||
return cmp(*d_kv1, *d_kv2);
|
||||
return cmp(rjson::base64_decode(kv1.value), rjson::base64_decode(kv2.value));
|
||||
}
|
||||
// cannot reach here, as check_comparable_type() verifies the type is one
|
||||
// of the above options.
|
||||
@@ -481,13 +464,7 @@ static bool check_BETWEEN(const rjson::value* v, const rjson::value& lb, const r
|
||||
bounds_from_query);
|
||||
}
|
||||
if (kv_v.name == "B") {
|
||||
auto d_kv_v = unwrap_bytes(kv_v.value, v_from_query);
|
||||
auto d_kv_lb = unwrap_bytes(kv_lb.value, lb_from_query);
|
||||
auto d_kv_ub = unwrap_bytes(kv_ub.value, ub_from_query);
|
||||
if(!d_kv_v || !d_kv_lb || !d_kv_ub) {
|
||||
return false;
|
||||
}
|
||||
return check_BETWEEN(*d_kv_v, *d_kv_lb, *d_kv_ub, bounds_from_query);
|
||||
return check_BETWEEN(rjson::base64_decode(kv_v.value), rjson::base64_decode(kv_lb.value), rjson::base64_decode(kv_ub.value), bounds_from_query);
|
||||
}
|
||||
if (v_from_query) {
|
||||
throw api_error::validation(
|
||||
@@ -580,7 +557,7 @@ static bool verify_expected_one(const rjson::value& condition, const rjson::valu
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", argtype));
|
||||
}
|
||||
return check_CONTAINS(got, arg, false, true);
|
||||
return check_CONTAINS(got, arg);
|
||||
}
|
||||
case comparison_operator_type::NOT_CONTAINS:
|
||||
{
|
||||
@@ -594,7 +571,7 @@ static bool verify_expected_one(const rjson::value& condition, const rjson::valu
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", argtype));
|
||||
}
|
||||
return check_NOT_CONTAINS(got, arg, false, true);
|
||||
return check_NOT_CONTAINS(got, arg);
|
||||
}
|
||||
}
|
||||
throw std::logic_error(format("Internal error: corrupted operator enum: {}", int(op)));
|
||||
|
||||
@@ -38,7 +38,7 @@ conditional_operator_type get_conditional_operator(const rjson::value& req);
|
||||
bool verify_expected(const rjson::value& req, const rjson::value* previous_item);
|
||||
bool verify_condition(const rjson::value& condition, bool require_all, const rjson::value* previous_item);
|
||||
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query);
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2);
|
||||
bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query);
|
||||
|
||||
bool verify_condition_expression(
|
||||
|
||||
@@ -14,8 +14,6 @@
|
||||
#include "db/config.hh"
|
||||
#include "cdc/generation_service.hh"
|
||||
#include "service/memory_limiter.hh"
|
||||
#include "auth/service.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
@@ -30,8 +28,6 @@ controller::controller(
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
const db::config& config)
|
||||
: _gossiper(gossiper)
|
||||
, _proxy(proxy)
|
||||
@@ -39,8 +35,6 @@ controller::controller(
|
||||
, _sys_dist_ks(sys_dist_ks)
|
||||
, _cdc_gen_svc(cdc_gen_svc)
|
||||
, _memory_limiter(memory_limiter)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _config(config)
|
||||
{
|
||||
}
|
||||
@@ -76,17 +70,14 @@ future<> controller::start_server() {
|
||||
_ssg = create_smp_service_group(c).get0();
|
||||
|
||||
rmw_operation::set_default_write_isolation(_config.alternator_write_isolation());
|
||||
executor::set_default_timeout(std::chrono::milliseconds(_config.alternator_timeout_in_ms()));
|
||||
|
||||
net::inet_address addr = utils::resolve(_config.alternator_address, family).get0();
|
||||
|
||||
auto get_cdc_metadata = [] (cdc::generation_service& svc) { return std::ref(svc.get_cdc_metadata()); };
|
||||
auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> {
|
||||
return cfg.alternator_timeout_in_ms;
|
||||
};
|
||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_mm), std::ref(_sys_dist_ks),
|
||||
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value(),
|
||||
sharded_parameter(get_timeout_in_ms, std::ref(_config))).get();
|
||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
|
||||
|
||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_mm), std::ref(_sys_dist_ks), sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value()).get();
|
||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper)).get();
|
||||
// Note: from this point on, if start_server() throws for any reason,
|
||||
// it must first call stop_server() to stop the executor and server
|
||||
// services we just started - or Scylla will cause an assertion
|
||||
|
||||
@@ -34,14 +34,6 @@ class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth {
|
||||
class service;
|
||||
}
|
||||
|
||||
namespace qos {
|
||||
class service_level_controller;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// This is the official DynamoDB API version.
|
||||
@@ -61,8 +53,6 @@ class controller : public protocol_server {
|
||||
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
||||
sharded<cdc::generation_service>& _cdc_gen_svc;
|
||||
sharded<service::memory_limiter>& _memory_limiter;
|
||||
sharded<auth::service>& _auth_service;
|
||||
sharded<qos::service_level_controller>& _sl_controller;
|
||||
const db::config& _config;
|
||||
|
||||
std::vector<socket_address> _listen_addresses;
|
||||
@@ -78,8 +68,6 @@ public:
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
const db::config& config);
|
||||
|
||||
virtual sstring name() const override;
|
||||
|
||||
@@ -23,7 +23,7 @@ namespace alternator {
|
||||
// api_error into a JSON object, and that is returned to the user.
|
||||
class api_error final : public std::exception {
|
||||
public:
|
||||
using status_type = http::reply::status_type;
|
||||
using status_type = httpd::reply::status_type;
|
||||
status_type _http_code;
|
||||
std::string _type;
|
||||
std::string _msg;
|
||||
@@ -73,11 +73,8 @@ public:
|
||||
static api_error serialization(std::string msg) {
|
||||
return api_error("SerializationException", std::move(msg));
|
||||
}
|
||||
static api_error table_not_found(std::string msg) {
|
||||
return api_error("TableNotFoundException", std::move(msg));
|
||||
}
|
||||
static api_error internal(std::string msg) {
|
||||
return api_error("InternalServerError", std::move(msg), http::reply::status_type::internal_server_error);
|
||||
return api_error("InternalServerError", std::move(msg), reply::status_type::internal_server_error);
|
||||
}
|
||||
|
||||
// Provide the "std::exception" interface, to make it easier to print this
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,6 @@
|
||||
#include "alternator/error.hh"
|
||||
#include "stats.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
|
||||
namespace db {
|
||||
class system_distributed_keyspace;
|
||||
@@ -82,10 +81,10 @@ namespace parsed {
|
||||
class path;
|
||||
};
|
||||
|
||||
const std::map<sstring, sstring>& get_tags_of_table(schema_ptr schema);
|
||||
std::optional<std::string> find_tag(const schema& s, const sstring& tag);
|
||||
future<> update_tags(service::migration_manager& mm, schema_ptr schema, std::map<sstring, sstring>&& tags_map);
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
// Wraps the db::get_tags_of_table and throws if the table is missing the tags extension.
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
|
||||
|
||||
// An attribute_path_map object is used to hold data for various attributes
|
||||
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
|
||||
@@ -145,11 +144,6 @@ template<typename T>
|
||||
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
|
||||
|
||||
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
|
||||
// attrs_to_get lists which top-level attribute are needed, and possibly also
|
||||
// which part of the top-level attribute is really needed (when nested
|
||||
// attribute paths appeared in the query).
|
||||
// Most code actually uses optional<attrs_to_get>. There, a disengaged
|
||||
// optional means we should get all attributes, not specific ones.
|
||||
using attrs_to_get = attribute_path_map<std::monostate>;
|
||||
|
||||
|
||||
@@ -171,16 +165,8 @@ public:
|
||||
static constexpr auto KEYSPACE_NAME_PREFIX = "alternator_";
|
||||
static constexpr std::string_view INTERNAL_TABLE_PREFIX = ".scylla.alternator.";
|
||||
|
||||
executor(gms::gossiper& gossiper,
|
||||
service::storage_proxy& proxy,
|
||||
service::migration_manager& mm,
|
||||
db::system_distributed_keyspace& sdks,
|
||||
cdc::metadata& cdc_metadata,
|
||||
smp_service_group ssg,
|
||||
utils::updateable_value<uint32_t> default_timeout_in_ms)
|
||||
: _gossiper(gossiper), _proxy(proxy), _mm(mm), _sdks(sdks), _cdc_metadata(cdc_metadata), _ssg(ssg) {
|
||||
s_default_timeout_in_ms = std::move(default_timeout_in_ms);
|
||||
}
|
||||
executor(gms::gossiper& gossiper, service::storage_proxy& proxy, service::migration_manager& mm, db::system_distributed_keyspace& sdks, cdc::metadata& cdc_metadata, smp_service_group ssg)
|
||||
: _gossiper(gossiper), _proxy(proxy), _mm(mm), _sdks(sdks), _cdc_metadata(cdc_metadata), _ssg(ssg) {}
|
||||
|
||||
future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
@@ -205,62 +191,42 @@ public:
|
||||
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request);
|
||||
|
||||
future<> start();
|
||||
future<> stop() {
|
||||
// disconnect from the value source, but keep the value unchanged.
|
||||
s_default_timeout_in_ms = utils::updateable_value<uint32_t>{s_default_timeout_in_ms()};
|
||||
return make_ready_future<>();
|
||||
}
|
||||
future<> stop() { return make_ready_future<>(); }
|
||||
|
||||
static sstring table_name(const schema&);
|
||||
static db::timeout_clock::time_point default_timeout();
|
||||
static void set_default_timeout(db::timeout_clock::duration timeout);
|
||||
private:
|
||||
static thread_local utils::updateable_value<uint32_t> s_default_timeout_in_ms;
|
||||
static db::timeout_clock::duration s_default_timeout;
|
||||
public:
|
||||
static schema_ptr find_table(service::storage_proxy&, const rjson::value& request);
|
||||
|
||||
private:
|
||||
friend class rmw_operation;
|
||||
|
||||
static bool is_alternator_keyspace(const sstring& ks_name);
|
||||
static sstring make_keyspace_name(const sstring& table_name);
|
||||
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr);
|
||||
|
||||
public:
|
||||
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&);
|
||||
|
||||
|
||||
public:
|
||||
static std::optional<rjson::value> describe_single_item(schema_ptr,
|
||||
const query::partition_slice&,
|
||||
const cql3::selection::selection&,
|
||||
const query::result&,
|
||||
const std::optional<attrs_to_get>&);
|
||||
|
||||
static future<std::vector<rjson::value>> describe_multi_item(schema_ptr schema,
|
||||
const query::partition_slice&& slice,
|
||||
shared_ptr<cql3::selection::selection> selection,
|
||||
foreign_ptr<lw_shared_ptr<query::result>> query_result,
|
||||
shared_ptr<const std::optional<attrs_to_get>> attrs_to_get);
|
||||
const attrs_to_get&);
|
||||
|
||||
static void describe_single_item(const cql3::selection::selection&,
|
||||
const std::vector<managed_bytes_opt>&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
const std::vector<bytes_opt>&,
|
||||
const attrs_to_get&,
|
||||
rjson::value&,
|
||||
bool = false);
|
||||
|
||||
static void add_stream_options(const rjson::value& stream_spec, schema_builder&, service::storage_proxy& sp);
|
||||
static void supplement_table_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
|
||||
static void supplement_table_stream_info(rjson::value& descr, const schema& schema, const service::storage_proxy& sp);
|
||||
static void supplement_table_stream_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
|
||||
};
|
||||
|
||||
// is_big() checks approximately if the given JSON value is "bigger" than
|
||||
// the given big_size number of bytes. The goal is to *quickly* detect
|
||||
// oversized JSON that, for example, is too large to be serialized to a
|
||||
// contiguous string - we don't need an accurate size for that. Moreover,
|
||||
// as soon as we detect that the JSON is indeed "big", we can return true
|
||||
// and don't need to continue calculating its exact size.
|
||||
// For simplicity, we use a recursive implementation. This is fine because
|
||||
// Alternator limits the depth of JSONs it reads from inputs, and doesn't
|
||||
// add more than a couple of levels in its own output construction.
|
||||
bool is_big(const rjson::value& val, int big_size = 100'000);
|
||||
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
namespace alternator {
|
||||
|
||||
template <typename Func, typename Result = std::result_of_t<Func(expressionsParser&)>>
|
||||
static Result do_with_parser(std::string_view input, Func&& f) {
|
||||
Result do_with_parser(std::string input, Func&& f) {
|
||||
expressionsLexer::InputStreamType input_stream{
|
||||
reinterpret_cast<const ANTLR_UINT8*>(input.data()),
|
||||
ANTLR_ENC_UTF8,
|
||||
@@ -43,41 +43,31 @@ static Result do_with_parser(std::string_view input, Func&& f) {
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename Func, typename Result = std::result_of_t<Func(expressionsParser&)>>
|
||||
static Result parse(const char* input_name, std::string_view input, Func&& f) {
|
||||
if (input.length() > 4096) {
|
||||
throw expressions_syntax_error(format("{} expression size {} exceeds allowed maximum 4096.",
|
||||
input_name, input.length()));
|
||||
}
|
||||
try {
|
||||
return do_with_parser(input, f);
|
||||
} catch (expressions_syntax_error& e) {
|
||||
// If already an expressions_syntax_error, don't print the type's
|
||||
// name (it's just ugly), just the message.
|
||||
// TODO: displayRecognitionError could set a position inside the
|
||||
// expressions_syntax_error in throws, and we could use it here to
|
||||
// mark the broken position in 'input'.
|
||||
throw expressions_syntax_error(format("Failed parsing {} '{}': {}",
|
||||
input_name, input, e.what()));
|
||||
} catch (...) {
|
||||
throw expressions_syntax_error(format("Failed parsing {} '{}': {}",
|
||||
input_name, input, std::current_exception()));
|
||||
}
|
||||
}
|
||||
|
||||
parsed::update_expression
|
||||
parse_update_expression(std::string_view query) {
|
||||
return parse("UpdateExpression", query, std::mem_fn(&expressionsParser::update_expression));
|
||||
parse_update_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::update_expression));
|
||||
} catch (...) {
|
||||
throw expressions_syntax_error(format("Failed parsing UpdateExpression '{}': {}", query, std::current_exception()));
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<parsed::path>
|
||||
parse_projection_expression(std::string_view query) {
|
||||
return parse ("ProjectionExpression", query, std::mem_fn(&expressionsParser::projection_expression));
|
||||
parse_projection_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::projection_expression));
|
||||
} catch (...) {
|
||||
throw expressions_syntax_error(format("Failed parsing ProjectionExpression '{}': {}", query, std::current_exception()));
|
||||
}
|
||||
}
|
||||
|
||||
parsed::condition_expression
|
||||
parse_condition_expression(std::string_view query, const char* caller) {
|
||||
return parse(caller, query, std::mem_fn(&expressionsParser::condition_expression));
|
||||
parse_condition_expression(std::string query) {
|
||||
try {
|
||||
return do_with_parser(query, std::mem_fn(&expressionsParser::condition_expression));
|
||||
} catch (...) {
|
||||
throw expressions_syntax_error(format("Failed parsing ConditionExpression '{}': {}", query, std::current_exception()));
|
||||
}
|
||||
}
|
||||
|
||||
namespace parsed {
|
||||
@@ -428,14 +418,9 @@ void for_condition_expression_on(const parsed::condition_expression& ce, const n
|
||||
// calculate_size() is ConditionExpression's size() function, i.e., it takes
|
||||
// a JSON-encoded value and returns its "size" as defined differently for the
|
||||
// different types - also as a JSON-encoded number.
|
||||
// If the value's type (e.g. number) has no size defined, there are two cases:
|
||||
// 1. If from_data (the value came directly from an attribute of the data),
|
||||
// It returns a JSON-encoded "null" value. Comparisons against this
|
||||
// non-numeric value will later fail, so eventually the application will
|
||||
// get a ConditionalCheckFailedException.
|
||||
// 2. Otherwise (the value came from a constant in the query or some other
|
||||
// calculation), throw a ValidationException.
|
||||
static rjson::value calculate_size(const rjson::value& v, bool from_data) {
|
||||
// It return a JSON-encoded "null" value if this value's type has no size
|
||||
// defined. Comparisons against this non-numeric value will later fail.
|
||||
static rjson::value calculate_size(const rjson::value& v) {
|
||||
// NOTE: If v is improperly formatted for our JSON value encoding, it
|
||||
// must come from the request itself, not from the database, so it makes
|
||||
// sense to throw a ValidationException if we see such a problem.
|
||||
@@ -464,12 +449,10 @@ static rjson::value calculate_size(const rjson::value& v, bool from_data) {
|
||||
throw api_error::validation(format("invalid byte string: {}", v));
|
||||
}
|
||||
ret = base64_decoded_len(rjson::to_string_view(it->value));
|
||||
} else if (from_data) {
|
||||
} else {
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::add(json_ret, "null", rjson::value(true));
|
||||
return json_ret;
|
||||
} else {
|
||||
throw api_error::validation(format("Unsupported operand type {} for function size()", it->name));
|
||||
}
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::add(json_ret, "N", rjson::from_string(std::to_string(ret)));
|
||||
@@ -551,7 +534,7 @@ std::unordered_map<std::string_view, function_handler_type*> function_handlers {
|
||||
format("{}: size() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return calculate_size(v, f._parameters[0].is_path());
|
||||
return calculate_size(v);
|
||||
}
|
||||
},
|
||||
{"attribute_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
@@ -651,8 +634,7 @@ std::unordered_map<std::string_view, function_handler_type*> function_handlers {
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return to_bool_json(check_CONTAINS(v1.IsNull() ? nullptr : &v1, v2,
|
||||
f._parameters[0].is_constant(), f._parameters[1].is_constant()));
|
||||
return to_bool_json(check_CONTAINS(v1.IsNull() ? nullptr : &v1, v2));
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -679,7 +661,7 @@ static rjson::value extract_path(const rjson::value* item,
|
||||
// objects. But today Alternator does not validate the structure
|
||||
// of nested documents before storing them, so this can happen on
|
||||
// read.
|
||||
throw api_error::validation(format("{}: malformed item read: {}", caller, *item));
|
||||
throw api_error::validation(format("{}: malformed item read: {}", *item));
|
||||
}
|
||||
const char* type = v->MemberBegin()->name.GetString();
|
||||
v = &(v->MemberBegin()->value);
|
||||
|
||||
@@ -74,22 +74,7 @@ options {
|
||||
*/
|
||||
@parser::context {
|
||||
void displayRecognitionError(ANTLR_UINT8** token_names, ExceptionBaseType* ex) {
|
||||
const char* err;
|
||||
switch (ex->getType()) {
|
||||
case antlr3::ExceptionType::FAILED_PREDICATE_EXCEPTION:
|
||||
err = "expression nested too deeply";
|
||||
break;
|
||||
default:
|
||||
err = "syntax error";
|
||||
break;
|
||||
}
|
||||
// Alternator expressions are always single line so ex->get_line()
|
||||
// is always 1, no sense to print it.
|
||||
// TODO: return the position as part of the exception, so the
|
||||
// caller in expressions.cc that knows the expression string can
|
||||
// mark the error position in the final error message.
|
||||
throw expressions_syntax_error(format("{} at char {}", err,
|
||||
ex->get_charPositionInLine()));
|
||||
throw expressions_syntax_error("syntax error");
|
||||
}
|
||||
}
|
||||
@lexer::context {
|
||||
@@ -98,23 +83,6 @@ options {
|
||||
}
|
||||
}
|
||||
|
||||
/* Unfortunately, ANTLR uses recursion - not the heap - to parse recursive
|
||||
* expressions. To make things even worse, ANTLR has no way to limit the
|
||||
* depth of this recursion (unlike Yacc which has YYMAXDEPTH). So deeply-
|
||||
* nested expression like "(((((((((((((..." can easily crash Scylla on a
|
||||
* stack overflow (see issue #14477).
|
||||
*
|
||||
* We are lucky that in the grammar for DynamoDB expressions (below),
|
||||
* only a few specific rules can recurse, so it was fairly easy to add a
|
||||
* "depth" counter to a few specific rules, and then use a predicate
|
||||
* "{depth<MAX_DEPTH}?" to avoid parsing if the depth exceeds this limit,
|
||||
* and throw a FAILED_PREDICATE_EXCEPTION in that case, which we will
|
||||
* report to the user as a "expression nested too deeply" error.
|
||||
*/
|
||||
@parser::members {
|
||||
static constexpr int MAX_DEPTH = 400;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lexical analysis phase, i.e., splitting the input up to tokens.
|
||||
* Lexical analyzer rules have names starting in capital letters.
|
||||
@@ -187,20 +155,19 @@ path returns [parsed::path p]:
|
||||
| '[' INTEGER ']' { $p.add_index(std::stoi($INTEGER.text)); }
|
||||
)*;
|
||||
|
||||
/* See comment above why the "depth" counter was needed here */
|
||||
value[int depth] returns [parsed::value v]:
|
||||
value returns [parsed::value v]:
|
||||
VALREF { $v.set_valref($VALREF.text); }
|
||||
| path { $v.set_path($path.p); }
|
||||
| {depth<MAX_DEPTH}? NAME { $v.set_func_name($NAME.text); }
|
||||
'(' x=value[depth+1] { $v.add_func_parameter($x.v); }
|
||||
(',' x=value[depth+1] { $v.add_func_parameter($x.v); })*
|
||||
| NAME { $v.set_func_name($NAME.text); }
|
||||
'(' x=value { $v.add_func_parameter($x.v); }
|
||||
(',' x=value { $v.add_func_parameter($x.v); })*
|
||||
')'
|
||||
;
|
||||
|
||||
update_expression_set_rhs returns [parsed::set_rhs rhs]:
|
||||
v=value[0] { $rhs.set_value(std::move($v.v)); }
|
||||
( '+' v=value[0] { $rhs.set_plus(std::move($v.v)); }
|
||||
| '-' v=value[0] { $rhs.set_minus(std::move($v.v)); }
|
||||
v=value { $rhs.set_value(std::move($v.v)); }
|
||||
( '+' v=value { $rhs.set_plus(std::move($v.v)); }
|
||||
| '-' v=value { $rhs.set_minus(std::move($v.v)); }
|
||||
)?
|
||||
;
|
||||
|
||||
@@ -238,7 +205,7 @@ projection_expression returns [std::vector<parsed::path> v]:
|
||||
|
||||
|
||||
primitive_condition returns [parsed::primitive_condition c]:
|
||||
v=value[0] { $c.add_value(std::move($v.v));
|
||||
v=value { $c.add_value(std::move($v.v));
|
||||
$c.set_operator(parsed::primitive_condition::type::VALUE); }
|
||||
( ( '=' { $c.set_operator(parsed::primitive_condition::type::EQ); }
|
||||
| '<' '>' { $c.set_operator(parsed::primitive_condition::type::NE); }
|
||||
@@ -247,14 +214,14 @@ primitive_condition returns [parsed::primitive_condition c]:
|
||||
| '>' { $c.set_operator(parsed::primitive_condition::type::GT); }
|
||||
| '>' '=' { $c.set_operator(parsed::primitive_condition::type::GE); }
|
||||
)
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
| BETWEEN { $c.set_operator(parsed::primitive_condition::type::BETWEEN); }
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
AND
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
| IN '(' { $c.set_operator(parsed::primitive_condition::type::IN); }
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
(',' v=value[0] { $c.add_value(std::move($v.v)); })*
|
||||
v=value { $c.add_value(std::move($v.v)); }
|
||||
(',' v=value { $c.add_value(std::move($v.v)); })*
|
||||
')'
|
||||
)?
|
||||
;
|
||||
@@ -264,20 +231,19 @@ primitive_condition returns [parsed::primitive_condition c]:
|
||||
// common rule prefixes, and (lack of) support for operator precedence.
|
||||
// These rules could have been written more clearly using a more powerful
|
||||
// parser generator - such as Yacc.
|
||||
// See comment above why the "depth" counter was needed here.
|
||||
boolean_expression[int depth] returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_1[depth] { $e.append(std::move($b.e), '|'); }
|
||||
(OR b=boolean_expression_1[depth] { $e.append(std::move($b.e), '|'); } )*
|
||||
boolean_expression returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_1 { $e.append(std::move($b.e), '|'); }
|
||||
(OR b=boolean_expression_1 { $e.append(std::move($b.e), '|'); } )*
|
||||
;
|
||||
boolean_expression_1[int depth] returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_2[depth] { $e.append(std::move($b.e), '&'); }
|
||||
(AND b=boolean_expression_2[depth] { $e.append(std::move($b.e), '&'); } )*
|
||||
boolean_expression_1 returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_2 { $e.append(std::move($b.e), '&'); }
|
||||
(AND b=boolean_expression_2 { $e.append(std::move($b.e), '&'); } )*
|
||||
;
|
||||
boolean_expression_2[int depth] returns [parsed::condition_expression e]:
|
||||
boolean_expression_2 returns [parsed::condition_expression e]:
|
||||
p=primitive_condition { $e.set_primitive(std::move($p.c)); }
|
||||
| {depth<MAX_DEPTH}? NOT b=boolean_expression_2[depth+1] { $e = std::move($b.e); $e.apply_not(); }
|
||||
| {depth<MAX_DEPTH}? '(' b=boolean_expression[depth+1] ')' { $e = std::move($b.e); }
|
||||
| NOT b=boolean_expression_2 { $e = std::move($b.e); $e.apply_not(); }
|
||||
| '(' b=boolean_expression ')' { $e = std::move($b.e); }
|
||||
;
|
||||
|
||||
condition_expression returns [parsed::condition_expression e]:
|
||||
boolean_expression[0] { e=std::move($boolean_expression.e); } EOF;
|
||||
boolean_expression { e=std::move($boolean_expression.e); } EOF;
|
||||
|
||||
@@ -26,9 +26,9 @@ public:
|
||||
using runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
parsed::update_expression parse_update_expression(std::string_view query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string_view query);
|
||||
parsed::condition_expression parse_condition_expression(std::string_view query, const char* caller);
|
||||
parsed::update_expression parse_update_expression(std::string query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string query);
|
||||
parsed::condition_expression parse_condition_expression(std::string query);
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
/*
|
||||
* Parsed representation of expressions and their components.
|
||||
*
|
||||
* Types in alternator::parsed namespace are used for holding the parse
|
||||
* Types in alternator::parse namespace are used for holding the parse
|
||||
* tree - objects generated by the Antlr rules after parsing an expression.
|
||||
* Because of the way Antlr works, all these objects are default-constructed
|
||||
* first, and then assigned when the rule is completed, so all these types
|
||||
|
||||
@@ -14,14 +14,11 @@
|
||||
#include "rapidjson/writer.h"
|
||||
#include "concrete_types.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
#include "mutation/position_in_partition.hh"
|
||||
|
||||
static logging::logger slogger("alternator-serialization");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
|
||||
type_info type_info_from_string(std::string_view type) {
|
||||
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {
|
||||
{"S", {alternator_type::S, utf8_type}},
|
||||
@@ -50,115 +47,6 @@ type_representation represent_type(alternator_type atype) {
|
||||
return it->second;
|
||||
}
|
||||
|
||||
// Get the magnitude and precision of a big_decimal - as these concepts are
|
||||
// defined by DynamoDB - to allow us to enforce limits on those as explained
|
||||
// in ssue #6794. The "magnitude" of 9e123 is 123 and of -9e-123 is -123,
|
||||
// the "precision" of 12.34e56 is the number of significant digits - 4.
|
||||
//
|
||||
// Unfortunately it turned out to be quite difficult to take a big_decimal and
|
||||
// calculate its magnitude and precision from its scale() and unscaled_value().
|
||||
// So in the following ugly implementation we calculate them from the string
|
||||
// representation instead. We assume the number was already parsed
|
||||
// sucessfully to a big_decimal to it follows its syntax rules.
|
||||
//
|
||||
// FIXME: rewrite this function to take a big_decimal, not a string.
|
||||
// Maybe a snippet like this can help:
|
||||
// boost::multiprecision::cpp_int digits = boost::multiprecision::log10(num.unscaled_value().convert_to<boost::multiprecision::mpf_float_50>()).convert_to<boost::multiprecision::cpp_int>() + 1;
|
||||
|
||||
|
||||
internal::magnitude_and_precision internal::get_magnitude_and_precision(std::string_view s) {
|
||||
size_t e_or_end = s.find_first_of("eE");
|
||||
std::string_view base = s.substr(0, e_or_end);
|
||||
if (s[0]=='-' || s[0]=='+') {
|
||||
base = base.substr(1);
|
||||
}
|
||||
int magnitude = 0;
|
||||
int precision = 0;
|
||||
size_t dot_or_end = base.find_first_of(".");
|
||||
size_t nonzero = base.find_first_not_of("0");
|
||||
if (dot_or_end != std::string_view::npos) {
|
||||
if (nonzero == dot_or_end) {
|
||||
// 0.000031 => magnitude = -5 (like 3.1e-5), precision = 2.
|
||||
std::string_view fraction = base.substr(dot_or_end + 1);
|
||||
size_t nonzero2 = fraction.find_first_not_of("0");
|
||||
if (nonzero2 != std::string_view::npos) {
|
||||
magnitude = -nonzero2 - 1;
|
||||
precision = fraction.size() - nonzero2;
|
||||
}
|
||||
} else {
|
||||
// 000123.45678 => magnitude = 2, precision = 8.
|
||||
magnitude = dot_or_end - nonzero - 1;
|
||||
precision = base.size() - nonzero - 1;
|
||||
}
|
||||
// trailing zeros don't count to precision, e.g., precision
|
||||
// of 1000.0, 1.0 or 1.0000 are just 1.
|
||||
size_t last_significant = base.find_last_not_of(".0");
|
||||
if (last_significant == std::string_view::npos) {
|
||||
precision = 0;
|
||||
} else if (last_significant < dot_or_end) {
|
||||
// e.g., 1000.00 reduce 5 = 7 - (0+1) - 1 from precision
|
||||
precision -= base.size() - last_significant - 2;
|
||||
} else {
|
||||
// e.g., 1235.60 reduce 5 = 7 - (5+1) from precision
|
||||
precision -= base.size() - last_significant - 1;
|
||||
}
|
||||
} else if (nonzero == std::string_view::npos) {
|
||||
// all-zero integer 000000
|
||||
magnitude = 0;
|
||||
precision = 0;
|
||||
} else {
|
||||
magnitude = base.size() - 1 - nonzero;
|
||||
precision = base.size() - nonzero;
|
||||
// trailing zeros don't count to precision, e.g., precision
|
||||
// of 1000 is just 1.
|
||||
size_t last_significant = base.find_last_not_of("0");
|
||||
if (last_significant == std::string_view::npos) {
|
||||
precision = 0;
|
||||
} else {
|
||||
// e.g., 1000 reduce 3 = 4 - (0+1)
|
||||
precision -= base.size() - last_significant - 1;
|
||||
}
|
||||
}
|
||||
if (precision && e_or_end != std::string_view::npos) {
|
||||
std::string_view exponent = s.substr(e_or_end + 1);
|
||||
if (exponent.size() > 4) {
|
||||
// don't even bother atoi(), exponent is too large
|
||||
magnitude = exponent[0]=='-' ? -9999 : 9999;
|
||||
} else {
|
||||
try {
|
||||
magnitude += boost::lexical_cast<int32_t>(exponent);
|
||||
} catch (...) {
|
||||
magnitude = 9999;
|
||||
}
|
||||
}
|
||||
}
|
||||
return magnitude_and_precision {magnitude, precision};
|
||||
}
|
||||
|
||||
// Parse a number read from user input, validating that it has a valid
|
||||
// numeric format and also in the allowed magnitude and precision ranges
|
||||
// (see issue #6794). Throws an api_error::validation if the validation
|
||||
// failed.
|
||||
static big_decimal parse_and_validate_number(std::string_view s) {
|
||||
try {
|
||||
big_decimal ret(s);
|
||||
auto [magnitude, precision] = internal::get_magnitude_and_precision(s);
|
||||
if (magnitude > 125) {
|
||||
throw api_error::validation(format("Number overflow: {}. Attempting to store a number with magnitude larger than supported range.", s));
|
||||
}
|
||||
if (magnitude < -130) {
|
||||
throw api_error::validation(format("Number underflow: {}. Attempting to store a number with magnitude lower than supported range.", s));
|
||||
}
|
||||
if (precision > 38) {
|
||||
throw api_error::validation(format("Number too precise: {}. Attempting to store a number with more significant digits than supported.", s));
|
||||
}
|
||||
return ret;
|
||||
} catch (const marshal_exception& e) {
|
||||
throw api_error::validation(format("The parameter cannot be converted to a numeric value: {}", s));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct from_json_visitor {
|
||||
const rjson::value& v;
|
||||
bytes_ostream& bo;
|
||||
@@ -168,19 +56,21 @@ struct from_json_visitor {
|
||||
bo.write(t.from_string(rjson::to_string_view(v)));
|
||||
}
|
||||
void operator()(const bytes_type_impl& t) const {
|
||||
// FIXME: it's difficult at this point to get information if value was provided
|
||||
// in request or comes from the storage, for now we assume it's user's fault.
|
||||
bo.write(*unwrap_bytes(v, true));
|
||||
bo.write(rjson::base64_decode(v));
|
||||
}
|
||||
void operator()(const boolean_type_impl& t) const {
|
||||
bo.write(boolean_type->decompose(v.GetBool()));
|
||||
}
|
||||
void operator()(const decimal_type_impl& t) const {
|
||||
bo.write(decimal_type->decompose(parse_and_validate_number(rjson::to_string_view(v))));
|
||||
try {
|
||||
bo.write(t.from_string(rjson::to_string_view(v)));
|
||||
} catch (const marshal_exception& e) {
|
||||
throw api_error::validation(format("The parameter cannot be converted to a numeric value: {}", v));
|
||||
}
|
||||
}
|
||||
// default
|
||||
void operator()(const abstract_type& t) const {
|
||||
bo.write(from_json_object(t, v));
|
||||
bo.write(from_json_object(t, v, cql_serialization_format::internal()));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -271,47 +161,32 @@ bytes get_key_column_value(const rjson::value& item, const column_definition& co
|
||||
return get_key_from_typed_value(*key_typed_value, column);
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry whose key is the type and the value is the encoded value.
|
||||
// If this type does not match the desired "type_str", an api_error::validation
|
||||
// error is thrown (the "name" parameter is the name of the column which will
|
||||
// mentioned in the exception message).
|
||||
// If the type does match, a reference to the encoded value is returned.
|
||||
static const rjson::value& get_typed_value(const rjson::value& key_typed_value, std::string_view type_str, std::string_view name, std::string_view value_name) {
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for {} {}: {}",
|
||||
value_name, name, key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (rjson::to_string_view(it->name) != type_str) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for {} {}, got type {}",
|
||||
type_str, value_name, name, it->name));
|
||||
}
|
||||
return it->value;
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry, whose key is the type (expected to match the key column's type)
|
||||
// and the value is the encoded value.
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column) {
|
||||
auto& value = get_typed_value(key_typed_value, type_to_string(column.type), column.name_as_text(), "key column");
|
||||
std::string_view value_view = rjson::to_string_view(value);
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for key column {}: {}",
|
||||
column.name_as_text(), key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (it->name != type_to_string(column.type)) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for key column {}, got type {}",
|
||||
type_to_string(column.type), column.name_as_text(), it->name));
|
||||
}
|
||||
std::string_view value_view = rjson::to_string_view(it->value);
|
||||
if (value_view.empty()) {
|
||||
throw api_error::validation(
|
||||
format("The AttributeValue for a key attribute cannot contain an empty string value. Key: {}", column.name_as_text()));
|
||||
}
|
||||
if (column.type == bytes_type) {
|
||||
// FIXME: it's difficult at this point to get information if value was provided
|
||||
// in request or comes from the storage, for now we assume it's user's fault.
|
||||
return *unwrap_bytes(value, true);
|
||||
} else if (column.type == decimal_type) {
|
||||
return decimal_type->decompose(parse_and_validate_number(rjson::to_string_view(value)));
|
||||
return rjson::base64_decode(it->value);
|
||||
} else {
|
||||
return column.type->from_string(value_view);
|
||||
return column.type->from_string(rjson::to_string_view(it->value));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -321,7 +196,7 @@ rjson::value json_key_column_value(bytes_view cell, const column_definition& col
|
||||
std::string b64 = base64_encode(cell);
|
||||
return rjson::from_string(b64);
|
||||
} if (column.type == utf8_type) {
|
||||
return rjson::from_string(reinterpret_cast<const char*>(cell.data()), cell.size());
|
||||
return rjson::from_string(std::string(reinterpret_cast<const char*>(cell.data()), cell.size()));
|
||||
} else if (column.type == decimal_type) {
|
||||
// FIXME: use specialized Alternator number type, not the more
|
||||
// general "decimal_type". A dedicated type can be more efficient
|
||||
@@ -362,38 +237,6 @@ clustering_key ck_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
return clustering_key::from_exploded(raw_ck);
|
||||
}
|
||||
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
auto ck = ck_from_json(item, schema);
|
||||
if (is_alternator_keyspace(schema->ks_name())) {
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
const auto region_item = rjson::find(item, scylla_paging_region);
|
||||
const auto weight_item = rjson::find(item, scylla_paging_weight);
|
||||
if (bool(region_item) != bool(weight_item)) {
|
||||
throw api_error::validation("Malformed value object: region and weight has to be either both missing or both present");
|
||||
}
|
||||
bound_weight weight;
|
||||
if (region_item) {
|
||||
auto region_view = rjson::to_string_view(get_typed_value(*region_item, "S", scylla_paging_region, "key region"));
|
||||
auto weight_view = rjson::to_string_view(get_typed_value(*weight_item, "N", scylla_paging_weight, "key weight"));
|
||||
auto region = parse_partition_region(region_view);
|
||||
if (weight_view == "-1") {
|
||||
weight = bound_weight::before_all_prefixed;
|
||||
} else if (weight_view == "0") {
|
||||
weight = bound_weight::equal;
|
||||
} else if (weight_view == "1") {
|
||||
weight = bound_weight::after_all_prefixed;
|
||||
} else {
|
||||
throw std::runtime_error(fmt::format("Invalid value for weight: {}", weight_view));
|
||||
}
|
||||
return position_in_partition(region, weight, region == partition_region::clustered ? std::optional(std::move(ck)) : std::nullopt);
|
||||
}
|
||||
if (ck.is_empty()) {
|
||||
return position_in_partition::for_partition_start();
|
||||
}
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error::validation(format("{}: invalid number object", diagnostic));
|
||||
@@ -402,13 +245,16 @@ big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
if (it->name != "N") {
|
||||
throw api_error::validation(format("{}: expected number, found type '{}'", diagnostic, it->name));
|
||||
}
|
||||
if (!it->value.IsString()) {
|
||||
// We shouldn't reach here. Callers normally validate their input
|
||||
// earlier with validate_value().
|
||||
throw api_error::validation(format("{}: improperly formatted number constant", diagnostic));
|
||||
try {
|
||||
if (!it->value.IsString()) {
|
||||
// We shouldn't reach here. Callers normally validate their input
|
||||
// earlier with validate_value().
|
||||
throw api_error::validation(format("{}: improperly formatted number constant", diagnostic));
|
||||
}
|
||||
return big_decimal(rjson::to_string_view(it->value));
|
||||
} catch (const marshal_exception& e) {
|
||||
throw api_error::validation(format("The parameter cannot be converted to a numeric value: {}", it->value));
|
||||
}
|
||||
big_decimal ret = parse_and_validate_number(rjson::to_string_view(it->value));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<big_decimal> try_unwrap_number(const rjson::value& v) {
|
||||
@@ -420,19 +266,8 @@ std::optional<big_decimal> try_unwrap_number(const rjson::value& v) {
|
||||
return std::nullopt;
|
||||
}
|
||||
try {
|
||||
return parse_and_validate_number(rjson::to_string_view(it->value));
|
||||
} catch (api_error&) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<bytes> unwrap_bytes(const rjson::value& value, bool from_query) {
|
||||
try {
|
||||
return rjson::base64_decode(value);
|
||||
} catch (...) {
|
||||
if (from_query) {
|
||||
throw api_error::serialization(format("Invalid base64 data"));
|
||||
}
|
||||
return big_decimal(rjson::to_string_view(it->value));
|
||||
} catch (const marshal_exception& e) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
@@ -466,7 +301,7 @@ rjson::value number_add(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto n1 = unwrap_number(v1, "UpdateExpression");
|
||||
auto n2 = unwrap_number(v2, "UpdateExpression");
|
||||
rjson::value ret = rjson::empty_object();
|
||||
sstring str_ret = (n1 + n2).to_string();
|
||||
std::string str_ret = std::string((n1 + n2).to_string());
|
||||
rjson::add(ret, "N", rjson::from_string(str_ret));
|
||||
return ret;
|
||||
}
|
||||
@@ -475,7 +310,7 @@ rjson::value number_subtract(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto n1 = unwrap_number(v1, "UpdateExpression");
|
||||
auto n2 = unwrap_number(v2, "UpdateExpression");
|
||||
rjson::value ret = rjson::empty_object();
|
||||
sstring str_ret = (n1 - n2).to_string();
|
||||
std::string str_ret = std::string((n1 - n2).to_string());
|
||||
rjson::add(ret, "N", rjson::from_string(str_ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -11,14 +11,12 @@
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <optional>
|
||||
#include "types/types.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "types.hh"
|
||||
#include "schema_fwd.hh"
|
||||
#include "keys.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
|
||||
class position_in_partition;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
enum class alternator_type : int8_t {
|
||||
@@ -35,9 +33,6 @@ struct type_representation {
|
||||
data_type dtype;
|
||||
};
|
||||
|
||||
inline constexpr std::string_view scylla_paging_region(":scylla:paging:region");
|
||||
inline constexpr std::string_view scylla_paging_weight(":scylla:paging:weight");
|
||||
|
||||
type_info type_info_from_string(std::string_view type);
|
||||
type_representation represent_type(alternator_type atype);
|
||||
|
||||
@@ -52,7 +47,6 @@ rjson::value json_key_column_value(bytes_view cell, const column_definition& col
|
||||
|
||||
partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
|
||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
|
||||
|
||||
// If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
|
||||
// raises ValidationException with diagnostic.
|
||||
@@ -62,11 +56,6 @@ big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
||||
// when the given v does not encode a number.
|
||||
std::optional<big_decimal> try_unwrap_number(const rjson::value& v);
|
||||
|
||||
// unwrap_bytes decodes byte value, on decoding failure it either raises api_error::serialization
|
||||
// iff from_query is true or returns unset optional iff from_query is false.
|
||||
// Therefore it's safe to dereference returned optional when called with from_query equal true.
|
||||
std::optional<bytes> unwrap_bytes(const rjson::value& value, bool from_query);
|
||||
|
||||
// Check if a given JSON object encodes a set (i.e., it is a {"SS": [...]}, or "NS", "BS"
|
||||
// and returns set's type and a pointer to that set. If the object does not encode a set,
|
||||
// returned value is {"", nullptr}
|
||||
@@ -94,12 +83,5 @@ std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value&
|
||||
// Returns a null value if one of the arguments is not actually a list.
|
||||
rjson::value list_concatenate(const rjson::value& v1, const rjson::value& v2);
|
||||
|
||||
namespace internal {
|
||||
struct magnitude_and_precision {
|
||||
int magnitude;
|
||||
int precision;
|
||||
};
|
||||
magnitude_and_precision get_magnitude_and_precision(std::string_view);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -16,21 +16,18 @@
|
||||
#include <seastar/util/short_streams.hh>
|
||||
#include "seastarx.hh"
|
||||
#include "error.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "auth.hh"
|
||||
#include <cctype>
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
#include "utils/aws_sigv4.hh"
|
||||
|
||||
static logging::logger slogger("alternator-server");
|
||||
|
||||
using namespace httpd;
|
||||
using request = http::request;
|
||||
using reply = http::reply;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -146,7 +143,7 @@ public:
|
||||
std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
handle_CORS(*req, *rep, false);
|
||||
return _f_handle(std::move(req), std::move(rep)).then(
|
||||
[](std::unique_ptr<reply> rep) {
|
||||
[this](std::unique_ptr<reply> rep) {
|
||||
rep->set_mime_type("application/x-amz-json-1.0");
|
||||
rep->done();
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
@@ -155,10 +152,8 @@ public:
|
||||
|
||||
protected:
|
||||
void generate_error_reply(reply& rep, const api_error& err) {
|
||||
rjson::value results = rjson::empty_object();
|
||||
rjson::add(results, "__type", rjson::from_string("com.amazonaws.dynamodb.v20120810#" + err._type));
|
||||
rjson::add(results, "message", err._msg);
|
||||
rep._content = rjson::print(std::move(results));
|
||||
rep._content += "{\"__type\":\"com.amazonaws.dynamodb.v20120810#" + err._type + "\"," +
|
||||
"\"message\":\"" + err._msg + "\"}";
|
||||
rep._status = err._http_code;
|
||||
slogger.trace("api_handler error case: {}", rep._content);
|
||||
}
|
||||
@@ -204,14 +199,12 @@ protected:
|
||||
// It's very easy to get a list of all live nodes on the cluster,
|
||||
// using _gossiper().get_live_members(). But getting
|
||||
// just the list of live nodes in this DC needs more elaborate code:
|
||||
auto& topology = _proxy.get_token_metadata_ptr()->get_topology();
|
||||
sstring local_dc = topology.get_datacenter();
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes = topology.get_datacenter_endpoints().at(local_dc);
|
||||
sstring local_dc = locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(
|
||||
utils::fb_utilities::get_broadcast_address());
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes =
|
||||
_proxy.get_token_metadata_ptr()->get_topology().get_datacenter_endpoints().at(local_dc);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
// Note that it's not enough for the node to be is_alive() - a
|
||||
// node joining the cluster is also "alive" but not responsive to
|
||||
// requests. We need the node to be in normal state. See #19694.
|
||||
if (_gossiper.is_normal(ip)) {
|
||||
if (_gossiper.is_alive(ip)) {
|
||||
rjson::push_back(results, rjson::from_string(ip.to_sstring()));
|
||||
}
|
||||
}
|
||||
@@ -241,7 +234,7 @@ protected:
|
||||
future<std::string> server::verify_signature(const request& req, const chunked_content& content) {
|
||||
if (!_enforce_authorization) {
|
||||
slogger.debug("Skipping authorization");
|
||||
return make_ready_future<std::string>();
|
||||
return make_ready_future<std::string>("<unauthenticated request>");
|
||||
}
|
||||
auto host_it = req._headers.find("Host");
|
||||
if (host_it == req._headers.end()) {
|
||||
@@ -323,13 +316,8 @@ future<std::string> server::verify_signature(const request& req, const chunked_c
|
||||
region = std::move(region),
|
||||
service = std::move(service),
|
||||
user_signature = std::move(user_signature)] (key_cache::value_ptr key_ptr) {
|
||||
std::string signature;
|
||||
try {
|
||||
signature = utils::aws::get_signature(user, *key_ptr, std::string_view(host), "/", req._method,
|
||||
datestamp, signed_headers_str, signed_headers_map, &content, region, service, "");
|
||||
} catch (const std::exception& e) {
|
||||
throw api_error::invalid_signature(e.what());
|
||||
}
|
||||
std::string signature = get_signature(user, *key_ptr, std::string_view(host), req._method,
|
||||
datestamp, signed_headers_str, signed_headers_map, content, region, service, "");
|
||||
|
||||
if (signature != std::string_view(user_signature)) {
|
||||
_key_cache.remove(user);
|
||||
@@ -376,9 +364,7 @@ static tracing::trace_state_ptr maybe_trace_query(service::client_state& client_
|
||||
tracing::add_session_param(trace_state, "alternator_op", op);
|
||||
tracing::add_query(trace_state, truncated_content_view(query, buf));
|
||||
tracing::begin(trace_state, format("Alternator {}", op), client_state.get_client_address());
|
||||
if (!username.empty()) {
|
||||
tracing::set_username(trace_state, auth::authenticated_user(username));
|
||||
}
|
||||
tracing::set_username(trace_state, auth::authenticated_user(username));
|
||||
}
|
||||
return trace_state;
|
||||
}
|
||||
@@ -421,13 +407,9 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
||||
auto leave = defer([this] () noexcept { _pending_requests.leave(); });
|
||||
//FIXME: Client state can provide more context, e.g. client's endpoint address
|
||||
// We use unique_ptr because client_state cannot be moved or copied
|
||||
executor::client_state client_state = username.empty()
|
||||
? service::client_state{service::client_state::internal_tag()}
|
||||
: service::client_state{service::client_state::internal_tag(), _auth_service, _sl_controller, username};
|
||||
co_await client_state.maybe_update_per_service_level_params();
|
||||
|
||||
executor::client_state client_state{executor::client_state::internal_tag()};
|
||||
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content);
|
||||
tracing::trace(trace_state, "{}", op);
|
||||
tracing::trace(trace_state, op);
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
co_return co_await callback_it->second(_executor, client_state, trace_state,
|
||||
make_service_permit(std::move(units)), std::move(json_request), std::move(req));
|
||||
@@ -458,14 +440,12 @@ void server::set_routes(routes& r) {
|
||||
//FIXME: A way to immediately invalidate the cache should be considered,
|
||||
// e.g. when the system table which stores the keys is changed.
|
||||
// For now, this propagation may take up to 1 minute.
|
||||
server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& auth_service, qos::service_level_controller& sl_controller)
|
||||
server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gossiper)
|
||||
: _http_server("http-alternator")
|
||||
, _https_server("https-alternator")
|
||||
, _executor(exec)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(gossiper)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _key_cache(1024, 1min, slogger)
|
||||
, _enforce_authorization(false)
|
||||
, _enabled_servers{}
|
||||
@@ -540,9 +520,6 @@ server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gos
|
||||
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
} {
|
||||
}
|
||||
|
||||
@@ -634,7 +611,7 @@ future<> server::json_parser::stop() {
|
||||
|
||||
const char* api_error::what() const noexcept {
|
||||
if (_what_string.empty()) {
|
||||
_what_string = format("{} {}: {}", static_cast<int>(_http_code), _type, _msg);
|
||||
_what_string = format("{} {}: {}", _http_code, _type, _msg);
|
||||
}
|
||||
return _what_string.c_str();
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
#include <seastar/net/tls.hh>
|
||||
#include <optional>
|
||||
#include "alternator/auth.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
#include <seastar/core/units.hh>
|
||||
@@ -27,16 +26,14 @@ using chunked_content = rjson::chunked_content;
|
||||
class server {
|
||||
static constexpr size_t content_length_limit = 16*MB;
|
||||
using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&,
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>)>;
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<request>)>;
|
||||
using alternator_callbacks_map = std::unordered_map<std::string_view, alternator_callback>;
|
||||
|
||||
httpd::http_server _http_server;
|
||||
httpd::http_server _https_server;
|
||||
http_server _http_server;
|
||||
http_server _https_server;
|
||||
executor& _executor;
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
auth::service& _auth_service;
|
||||
qos::service_level_controller& _sl_controller;
|
||||
|
||||
key_cache _key_cache;
|
||||
bool _enforce_authorization;
|
||||
@@ -68,7 +65,7 @@ class server {
|
||||
json_parser _json_parser;
|
||||
|
||||
public:
|
||||
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller);
|
||||
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper);
|
||||
|
||||
future<> init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||
bool enforce_authorization, semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests);
|
||||
@@ -76,8 +73,8 @@ public:
|
||||
private:
|
||||
void set_routes(seastar::httpd::routes& r);
|
||||
// If verification succeeds, returns the authenticated user's username
|
||||
future<std::string> verify_signature(const seastar::http::request&, const chunked_content&);
|
||||
future<executor::request_return_type> handle_api_request(std::unique_ptr<http::request> req);
|
||||
future<std::string> verify_signature(const seastar::httpd::request&, const chunked_content&);
|
||||
future<executor::request_return_type> handle_api_request(std::unique_ptr<request> req);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -27,14 +27,14 @@
|
||||
#include "cql3/result_set.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include "schema/schema_builder.hh"
|
||||
#include "schema_builder.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "gms/feature.hh"
|
||||
#include "gms/feature_service.hh"
|
||||
|
||||
#include "executor.hh"
|
||||
#include "tags_extension.hh"
|
||||
#include "rmw_operation.hh"
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
|
||||
/**
|
||||
* Base template type to implement rapidjson::internal::TypeHelper<...>:s
|
||||
@@ -75,8 +75,8 @@ struct rapidjson::internal::TypeHelper<ValueType, utils::UUID>
|
||||
: public from_string_helper<ValueType, utils::UUID>
|
||||
{};
|
||||
|
||||
static db_clock::time_point as_timepoint(const table_id& tid) {
|
||||
return db_clock::time_point{utils::UUID_gen::unix_timestamp(tid.uuid())};
|
||||
static db_clock::time_point as_timepoint(const utils::UUID& uuid) {
|
||||
return db_clock::time_point{utils::UUID_gen::unix_timestamp(uuid)};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -107,9 +107,6 @@ public:
|
||||
stream_arn(const UUID& uuid)
|
||||
: UUID(uuid)
|
||||
{}
|
||||
stream_arn(const table_id& tid)
|
||||
: UUID(tid.uuid())
|
||||
{}
|
||||
stream_arn(std::string_view v)
|
||||
: UUID(v.substr(1))
|
||||
{
|
||||
@@ -141,44 +138,30 @@ namespace alternator {
|
||||
future<alternator::executor::request_return_type> alternator::executor::list_streams(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.list_streams++;
|
||||
|
||||
auto limit = rjson::get_opt<int>(request, "Limit").value_or(100);
|
||||
auto limit = rjson::get_opt<int>(request, "Limit").value_or(std::numeric_limits<int>::max());
|
||||
auto streams_start = rjson::get_opt<stream_arn>(request, "ExclusiveStartStreamArn");
|
||||
auto table = find_table(_proxy, request);
|
||||
auto db = _proxy.data_dictionary();
|
||||
auto cfs = db.get_tables();
|
||||
|
||||
if (limit < 1) {
|
||||
throw api_error::validation("Limit must be 1 or more");
|
||||
}
|
||||
|
||||
std::vector<data_dictionary::table> cfs;
|
||||
|
||||
if (table) {
|
||||
auto log_name = cdc::log_name(table->cf_name());
|
||||
try {
|
||||
cfs.emplace_back(db.find_table(table->ks_name(), log_name));
|
||||
} catch (data_dictionary::no_such_column_family&) {
|
||||
cfs.clear();
|
||||
}
|
||||
} else {
|
||||
cfs = db.get_tables();
|
||||
}
|
||||
|
||||
// # 12601 (maybe?) - sort the set of tables on ID. This should ensure we never
|
||||
// generate duplicates in a paged listing here. Can obviously miss things if they
|
||||
// are added between paged calls and end up with a "smaller" UUID/ARN, but that
|
||||
// is to be expected.
|
||||
if (std::cmp_less(limit, cfs.size()) || streams_start) {
|
||||
std::sort(cfs.begin(), cfs.end(), [](const data_dictionary::table& t1, const data_dictionary::table& t2) {
|
||||
return t1.schema()->id().uuid() < t2.schema()->id().uuid();
|
||||
});
|
||||
}
|
||||
std::sort(cfs.begin(), cfs.end(), [](const data_dictionary::table& t1, const data_dictionary::table& t2) {
|
||||
return t1.schema()->id() < t2.schema()->id();
|
||||
});
|
||||
|
||||
auto i = cfs.begin();
|
||||
auto e = cfs.end();
|
||||
|
||||
if (streams_start) {
|
||||
i = std::find_if(i, e, [&](const data_dictionary::table& t) {
|
||||
return t.schema()->id().uuid() == streams_start
|
||||
return t.schema()->id() == streams_start
|
||||
&& cdc::get_base_table(db.real_database(), *t.schema())
|
||||
&& is_alternator_keyspace(t.schema()->ks_name())
|
||||
;
|
||||
@@ -201,7 +184,14 @@ future<alternator::executor::request_return_type> alternator::executor::list_str
|
||||
if (!is_alternator_keyspace(ks_name)) {
|
||||
continue;
|
||||
}
|
||||
if (table && ks_name != table->ks_name()) {
|
||||
continue;
|
||||
}
|
||||
if (cdc::is_log_for_some_table(db.real_database(), ks_name, cf_name)) {
|
||||
if (table && table != cdc::get_base_table(db.real_database(), *s)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
rjson::value new_entry = rjson::empty_object();
|
||||
|
||||
last = i->schema()->id();
|
||||
@@ -429,8 +419,6 @@ static std::chrono::seconds confidence_interval(data_dictionary::database db) {
|
||||
return std::chrono::seconds(db.get_config().alternator_streams_time_window_s());
|
||||
}
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
// Dynamo docs says no data shall live longer than 24h.
|
||||
static constexpr auto dynamodb_streams_max_window = 24h;
|
||||
|
||||
@@ -448,7 +436,7 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
||||
auto db = _proxy.data_dictionary();
|
||||
|
||||
try {
|
||||
auto cf = db.find_column_family(table_id(stream_arn));
|
||||
auto cf = db.find_column_family(stream_arn);
|
||||
schema = cf.schema();
|
||||
bs = cdc::get_base_table(db.real_database(), *schema);
|
||||
} catch (...) {
|
||||
@@ -508,7 +496,7 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
||||
// filter out cdc generations older than the table or now() - cdc::ttl (typically dynamodb_streams_max_window - 24h)
|
||||
auto low_ts = std::max(as_timepoint(schema->id()), db_clock::now() - ttl);
|
||||
|
||||
return _sdks.cdc_get_versioned_streams(low_ts, { normal_token_owners }).then([db, shard_start, limit, ret = std::move(ret), stream_desc = std::move(stream_desc)] (std::map<db_clock::time_point, cdc::streams_version> topologies) mutable {
|
||||
return _sdks.cdc_get_versioned_streams(low_ts, { normal_token_owners }).then([this, db, shard_start, limit, ret = std::move(ret), stream_desc = std::move(stream_desc)] (std::map<db_clock::time_point, cdc::streams_version> topologies) mutable {
|
||||
|
||||
auto e = topologies.end();
|
||||
auto prev = e;
|
||||
@@ -735,7 +723,7 @@ future<executor::request_return_type> executor::get_shard_iterator(client_state&
|
||||
std::optional<shard_id> sid;
|
||||
|
||||
try {
|
||||
auto cf = db.find_column_family(table_id(stream_arn));
|
||||
auto cf = db.find_column_family(stream_arn);
|
||||
schema = cf.schema();
|
||||
sid = rjson::get<shard_id>(request, "ShardId");
|
||||
} catch (...) {
|
||||
@@ -820,14 +808,14 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
auto db = _proxy.data_dictionary();
|
||||
schema_ptr schema, base;
|
||||
try {
|
||||
auto log_table = db.find_column_family(table_id(iter.table));
|
||||
auto log_table = db.find_column_family(iter.table);
|
||||
schema = log_table.schema();
|
||||
base = cdc::get_base_table(db.real_database(), *schema);
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
if (!schema || !base || !is_alternator_keyspace(schema->ks_name())) {
|
||||
throw api_error::resource_not_found(fmt::to_string(iter.table));
|
||||
throw api_error::resource_not_found(boost::lexical_cast<std::string>(iter.table));
|
||||
}
|
||||
|
||||
tracing::add_table_name(trace_state, schema->ks_name(), schema->cf_name());
|
||||
@@ -850,14 +838,14 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
static const bytes op_column_name = cdc::log_meta_column_name_bytes("operation");
|
||||
static const bytes eor_column_name = cdc::log_meta_column_name_bytes("end_of_batch");
|
||||
|
||||
std::optional<attrs_to_get> key_names = boost::copy_range<attrs_to_get>(
|
||||
auto key_names = boost::copy_range<attrs_to_get>(
|
||||
boost::range::join(std::move(base->partition_key_columns()), std::move(base->clustering_key_columns()))
|
||||
| boost::adaptors::transformed([&] (const column_definition& cdef) {
|
||||
return std::make_pair<std::string, attrs_to_get_node>(cdef.name_as_text(), {}); })
|
||||
);
|
||||
// Include all base table columns as values (in case pre or post is enabled).
|
||||
// This will include attributes not stored in the frozen map column
|
||||
std::optional<attrs_to_get> attr_names = boost::copy_range<attrs_to_get>(base->regular_columns()
|
||||
auto attr_names = boost::copy_range<attrs_to_get>(base->regular_columns()
|
||||
// this will include the :attrs column, which we will also force evaluating.
|
||||
// But not having this set empty forces out any cdc columns from actual result
|
||||
| boost::adaptors::transformed([] (const column_definition& cdef) {
|
||||
@@ -894,11 +882,11 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
++mul;
|
||||
}
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, _proxy.get_max_result_size(partition_slice),
|
||||
query::tombstone_limit(_proxy.get_tombstone_limit()), query::row_limit(limit * mul));
|
||||
query::row_limit(limit * mul));
|
||||
|
||||
return _proxy.query(schema, std::move(command), std::move(partition_ranges), cl, service::storage_proxy::coordinator_query_options(default_timeout(), std::move(permit), client_state)).then(
|
||||
[this, schema, partition_slice = std::move(partition_slice), selection = std::move(selection), start_time = std::move(start_time), limit, key_names = std::move(key_names), attr_names = std::move(attr_names), type, iter, high_ts] (service::storage_proxy::coordinator_query_result qr) mutable {
|
||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now());
|
||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now(), cql_serialization_format::latest());
|
||||
query::result_view::consume(*qr.query_result, partition_slice, cql3::selection::result_set_builder::visitor(builder, *schema, *selection));
|
||||
|
||||
auto result_set = builder.build();
|
||||
@@ -1027,7 +1015,7 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
// ugh. figure out if we are and end-of-shard
|
||||
auto normal_token_owners = _proxy.get_token_metadata_ptr()->count_normal_token_owners();
|
||||
|
||||
return _sdks.cdc_current_generation_timestamp({ normal_token_owners }).then([this, iter, high_ts, start_time, ret = std::move(ret)](db_clock::time_point ts) mutable {
|
||||
return _sdks.cdc_current_generation_timestamp({ normal_token_owners }).then([this, iter, high_ts, start_time, ret = std::move(ret), nrecords](db_clock::time_point ts) mutable {
|
||||
auto& shard = iter.shard;
|
||||
|
||||
if (shard.time < ts && ts < high_ts) {
|
||||
@@ -1044,7 +1032,8 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
||||
rjson::add(ret, "NextShardIterator", iter);
|
||||
}
|
||||
_stats.api_operations.get_records_latency.add(std::chrono::steady_clock::now() - start_time);
|
||||
if (is_big(ret)) {
|
||||
// TODO: determine a better threshold...
|
||||
if (nrecords > 10) {
|
||||
return make_ready_future<executor::request_return_type>(make_streamed(std::move(ret)));
|
||||
}
|
||||
return make_ready_future<executor::request_return_type>(make_jsonable(std::move(ret)));
|
||||
@@ -1061,10 +1050,10 @@ void executor::add_stream_options(const rjson::value& stream_specification, sche
|
||||
if (stream_enabled->GetBool()) {
|
||||
auto db = sp.data_dictionary();
|
||||
|
||||
if (!db.features().cdc) {
|
||||
if (!db.features().cluster_supports_cdc()) {
|
||||
throw api_error::validation("StreamSpecification: streams (CDC) feature not enabled in cluster.");
|
||||
}
|
||||
if (!db.features().alternator_streams) {
|
||||
if (!db.features().cluster_supports_alternator_streams()) {
|
||||
throw api_error::validation("StreamSpecification: alternator streams feature not enabled in cluster.");
|
||||
}
|
||||
|
||||
@@ -1096,7 +1085,7 @@ void executor::add_stream_options(const rjson::value& stream_specification, sche
|
||||
}
|
||||
}
|
||||
|
||||
void executor::supplement_table_stream_info(rjson::value& descr, const schema& schema, const service::storage_proxy& sp) {
|
||||
void executor::supplement_table_stream_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp) {
|
||||
auto& opts = schema.cdc_options();
|
||||
if (opts.enabled()) {
|
||||
auto db = sp.data_dictionary();
|
||||
|
||||
@@ -9,10 +9,10 @@
|
||||
#pragma once
|
||||
|
||||
#include "serializer.hh"
|
||||
#include "schema/schema.hh"
|
||||
#include "schema.hh"
|
||||
#include "db/extensions.hh"
|
||||
|
||||
namespace db {
|
||||
namespace alternator {
|
||||
|
||||
class tags_extension : public schema_extension {
|
||||
public:
|
||||
@@ -37,9 +37,4 @@ private:
|
||||
std::map<sstring, sstring> _tags;
|
||||
};
|
||||
|
||||
// Information whether the view updates are synchronous is stored using the
|
||||
// SYNCHRONOUS_VIEW_UPDATES_TAG_KEY tag. Value of this tag is a stored as a
|
||||
// serialized boolean value ("true" or "false")
|
||||
static const sstring SYNCHRONOUS_VIEW_UPDATES_TAG_KEY("system:synchronous_view_updates");
|
||||
|
||||
}
|
||||
@@ -8,17 +8,14 @@
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <optional>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/lowres_clock.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/multiprecision/cpp_int.hpp>
|
||||
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "gms/inet_address.hh"
|
||||
#include "inet_address_vectors.hh"
|
||||
@@ -33,8 +30,8 @@
|
||||
#include "service/pager/query_pagers.hh"
|
||||
#include "gms/feature_service.hh"
|
||||
#include "sstables/types.hh"
|
||||
#include "mutation/mutation.hh"
|
||||
#include "types/types.hh"
|
||||
#include "mutation.hh"
|
||||
#include "types.hh"
|
||||
#include "types/map.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
@@ -47,8 +44,6 @@
|
||||
#include "alternator/controller.hh"
|
||||
#include "alternator/serialization.hh"
|
||||
#include "dht/sharder.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/tags/utils.hh"
|
||||
|
||||
#include "ttl.hh"
|
||||
|
||||
@@ -67,7 +62,7 @@ static const sstring TTL_TAG_KEY("system:ttl_attribute");
|
||||
|
||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.update_time_to_live++;
|
||||
if (!_proxy.data_dictionary().features().alternator_ttl) {
|
||||
if (!_proxy.data_dictionary().features().cluster_supports_alternator_ttl()) {
|
||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator-ttl' experimental feature is enabled on all nodes.");
|
||||
}
|
||||
|
||||
@@ -94,25 +89,24 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
}
|
||||
sstring attribute_name(v->GetString(), v->GetStringLength());
|
||||
|
||||
co_await db::modify_tags(_mm, schema->ks_name(), schema->cf_name(), [&](std::map<sstring, sstring>& tags_map) {
|
||||
if (enabled) {
|
||||
if (tags_map.contains(TTL_TAG_KEY)) {
|
||||
throw api_error::validation("TTL is already enabled");
|
||||
}
|
||||
tags_map[TTL_TAG_KEY] = attribute_name;
|
||||
} else {
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
throw api_error::validation("TTL is already disabled");
|
||||
} else if (i->second != attribute_name) {
|
||||
throw api_error::validation(format(
|
||||
"Requested to disable TTL on attribute {}, but a different attribute {} is enabled.",
|
||||
attribute_name, i->second));
|
||||
}
|
||||
tags_map.erase(TTL_TAG_KEY);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table(schema);
|
||||
if (enabled) {
|
||||
if (tags_map.contains(TTL_TAG_KEY)) {
|
||||
co_return api_error::validation("TTL is already enabled");
|
||||
}
|
||||
});
|
||||
|
||||
tags_map[TTL_TAG_KEY] = attribute_name;
|
||||
} else {
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
co_return api_error::validation("TTL is already disabled");
|
||||
} else if (i->second != attribute_name) {
|
||||
co_return api_error::validation(format(
|
||||
"Requested to disable TTL on attribute {}, but a different attribute {} is enabled.",
|
||||
attribute_name, i->second));
|
||||
}
|
||||
tags_map.erase(TTL_TAG_KEY);
|
||||
}
|
||||
co_await update_tags(_mm, schema, std::move(tags_map));
|
||||
// Prepare the response, which contains a TimeToLiveSpecification
|
||||
// basically identical to the request's
|
||||
rjson::value response = rjson::empty_object();
|
||||
@@ -123,7 +117,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.describe_time_to_live++;
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table(schema);
|
||||
rjson::value desc = rjson::empty_object();
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
@@ -139,7 +133,7 @@ future<executor::request_return_type> executor::describe_time_to_live(client_sta
|
||||
|
||||
// expiration_service is a sharded service responsible for cleaning up expired
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLive request.
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLeave request.
|
||||
//
|
||||
// Here is a brief overview of how the expiration service works:
|
||||
//
|
||||
@@ -153,26 +147,28 @@ future<executor::request_return_type> executor::describe_time_to_live(client_sta
|
||||
// To avoid scanning the same items RF times in RF replicas, only one node is
|
||||
// responsible for scanning a token range at a time. Normally, this is the
|
||||
// node owning this range as a "primary range" (the first node in the ring
|
||||
// with this range), but when this node is down, the secondary owner (the
|
||||
// second in the ring) may take over.
|
||||
// with this range), but when this node is down, other nodes may take over
|
||||
// (FIXME: this is not implemented yet).
|
||||
// An expiration thread is reponsible for all tables which need expiration
|
||||
// scans. Currently, the different tables are scanned sequentially (not in
|
||||
// parallel).
|
||||
// scans. FIXME: explain how this is done with multiple tables - parallel,
|
||||
// staggered, or what?
|
||||
// The expiration thread scans item using CL=QUORUM to ensures that it reads
|
||||
// a consistent expiration-time attribute. This means that the items are read
|
||||
// locally and in addition QUORUM-1 additional nodes (one additional node
|
||||
// when RF=3) need to read the data and send digests.
|
||||
// FIXME: explain if we can read the exact attribute or the entire map.
|
||||
// When the expiration thread decides that an item has expired and wants
|
||||
// to delete it, it does it using a CL=QUORUM write. This allows this
|
||||
// deletion to be visible for consistent (quorum) reads. The deletion,
|
||||
// like user deletions, will also appear on the CDC log and therefore
|
||||
// Alternator Streams if enabled - currently as ordinary deletes (the
|
||||
// userIdentity flag is currently missing this is issue #11523).
|
||||
expiration_service::expiration_service(data_dictionary::database db, service::storage_proxy& proxy, gms::gossiper& g)
|
||||
// Alternator Streams if enabled (FIXME: explain how we mark the
|
||||
// deletion different from user deletes. We don't do it yet.).
|
||||
expiration_service::expiration_service(data_dictionary::database db, service::storage_proxy& proxy)
|
||||
: _db(db)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(g)
|
||||
{
|
||||
//FIXME: add metrics for the service
|
||||
//setup_metrics();
|
||||
}
|
||||
|
||||
// Convert the big_decimal used to represent expiration time to an integer.
|
||||
@@ -241,7 +237,7 @@ static bool is_expired(const rjson::value& expiration_time, gc_clock::time_point
|
||||
// understands it is an expiration event - not a user-initiated deletion.
|
||||
static future<> expire_item(service::storage_proxy& proxy,
|
||||
const service::query_state& qs,
|
||||
const std::vector<managed_bytes_opt>& row,
|
||||
const std::vector<bytes_opt>& row,
|
||||
schema_ptr schema,
|
||||
api::timestamp_type ts) {
|
||||
// Prepare the row key to delete
|
||||
@@ -260,7 +256,7 @@ static future<> expire_item(service::storage_proxy& proxy,
|
||||
// FIXME: log or increment a metric if this happens.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
exploded_pk.push_back(to_bytes(*row_c));
|
||||
exploded_pk.push_back(*row_c);
|
||||
}
|
||||
auto pk = partition_key::from_exploded(exploded_pk);
|
||||
mutation m(schema, pk);
|
||||
@@ -280,18 +276,15 @@ static future<> expire_item(service::storage_proxy& proxy,
|
||||
// FIXME: log or increment a metric if this happens.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
exploded_ck.push_back(to_bytes(*row_c));
|
||||
exploded_ck.push_back(*row_c);
|
||||
}
|
||||
auto ck = clustering_key::from_exploded(exploded_ck);
|
||||
m.partition().clustered_row(*schema, ck).apply(tombstone(ts, gc_clock::now()));
|
||||
}
|
||||
std::vector<mutation> mutations;
|
||||
mutations.push_back(std::move(m));
|
||||
return proxy.mutate(std::move(mutations),
|
||||
return proxy.mutate(std::vector<mutation>{std::move(m)},
|
||||
db::consistency_level::LOCAL_QUORUM,
|
||||
executor::default_timeout(), // FIXME - which timeout?
|
||||
qs.get_trace_state(), qs.get_permit(),
|
||||
db::allow_per_partition_rate_limit::no);
|
||||
qs.get_trace_state(), qs.get_permit());
|
||||
}
|
||||
|
||||
static size_t random_offset(size_t min, size_t max) {
|
||||
@@ -370,7 +363,7 @@ static std::vector<std::pair<dht::token_range, gms::inet_address>> get_secondary
|
||||
// 2. The primary replica for this token is currently marked down.
|
||||
// 3. In this node, this shard is responsible for this token.
|
||||
// We use the <secondary> case to handle the possibility that some of the
|
||||
// nodes in the system are down. A dead node will not be expiring
|
||||
// nodes in the system are down. A dead node will not be expiring expiring
|
||||
// the tokens owned by it, so we want the secondary owner to take over its
|
||||
// primary ranges.
|
||||
//
|
||||
@@ -383,11 +376,12 @@ static std::vector<std::pair<dht::token_range, gms::inet_address>> get_secondary
|
||||
enum primary_or_secondary_t {primary, secondary};
|
||||
template<primary_or_secondary_t primary_or_secondary>
|
||||
class token_ranges_owned_by_this_shard {
|
||||
// ranges_holder_primary holds just the primary ranges themselves
|
||||
class ranges_holder_primary {
|
||||
template<primary_or_secondary_t> class ranges_holder;
|
||||
// ranges_holder<primary> holds just the primary ranges themselves
|
||||
template<> class ranges_holder<primary> {
|
||||
const dht::token_range_vector _token_ranges;
|
||||
public:
|
||||
ranges_holder_primary(const locator::vnode_effective_replication_map_ptr& erm, gms::gossiper& g, gms::inet_address ep)
|
||||
ranges_holder(const locator::effective_replication_map_ptr& erm, gms::inet_address ep)
|
||||
: _token_ranges(erm->get_primary_ranges(ep)) {}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
@@ -399,13 +393,13 @@ class token_ranges_owned_by_this_shard {
|
||||
};
|
||||
// ranges_holder<secondary> holds the secondary token ranges plus each
|
||||
// range's primary owner, needed to implement should_skip().
|
||||
class ranges_holder_secondary {
|
||||
template<> class ranges_holder<secondary> {
|
||||
std::vector<std::pair<dht::token_range, gms::inet_address>> _token_ranges;
|
||||
gms::gossiper& _gossiper;
|
||||
public:
|
||||
ranges_holder_secondary(const locator::effective_replication_map_ptr& erm, gms::gossiper& g, gms::inet_address ep)
|
||||
ranges_holder(const locator::effective_replication_map_ptr& erm, gms::inet_address ep)
|
||||
: _token_ranges(get_secondary_ranges(erm, ep))
|
||||
, _gossiper(g) {}
|
||||
, _gossiper(gms::get_local_gossiper()) {}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
return _token_ranges[i].first;
|
||||
@@ -420,25 +414,19 @@ class token_ranges_owned_by_this_shard {
|
||||
// _token_ranges will contain a list of token ranges owned by this node.
|
||||
// We'll further need to split each such range to the pieces owned by
|
||||
// the current shard, using _intersecter.
|
||||
using ranges_holder = std::conditional_t<
|
||||
primary_or_secondary == primary_or_secondary_t::primary,
|
||||
ranges_holder_primary,
|
||||
ranges_holder_secondary>;
|
||||
const ranges_holder _token_ranges;
|
||||
const ranges_holder<primary_or_secondary> _token_ranges;
|
||||
// NOTICE: _range_idx is used modulo _token_ranges size when accessing
|
||||
// the data to ensure that it doesn't go out of bounds
|
||||
size_t _range_idx;
|
||||
size_t _end_idx;
|
||||
std::optional<dht::selective_token_range_sharder> _intersecter;
|
||||
locator::effective_replication_map_ptr _erm;
|
||||
public:
|
||||
token_ranges_owned_by_this_shard(replica::database& db, gms::gossiper& g, schema_ptr s)
|
||||
token_ranges_owned_by_this_shard(replica::database& db, schema_ptr s)
|
||||
: _s(s)
|
||||
, _token_ranges(db.find_keyspace(s->ks_name()).get_effective_replication_map(),
|
||||
g, utils::fb_utilities::get_broadcast_address())
|
||||
utils::fb_utilities::get_broadcast_address())
|
||||
, _range_idx(random_offset(0, _token_ranges.size() - 1))
|
||||
, _end_idx(_range_idx + _token_ranges.size())
|
||||
, _erm(s->table().get_effective_replication_map())
|
||||
{
|
||||
tlogger.debug("Generating token ranges starting from base range {} of {}", _range_idx, _token_ranges.size());
|
||||
}
|
||||
@@ -471,7 +459,7 @@ public:
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
_intersecter.emplace(_erm->get_sharder(*_s), _token_ranges[_range_idx % _token_ranges.size()], this_shard_id());
|
||||
_intersecter.emplace(_s->get_sharder(), _token_ranges[_range_idx % _token_ranges.size()], this_shard_id());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,11 +502,9 @@ struct scan_ranges_context {
|
||||
selection = cql3::selection::selection::wildcard(s);
|
||||
query::partition_slice::option_set opts = selection->get_query_options();
|
||||
opts.set<query::partition_slice::option::allow_short_read>();
|
||||
// It is important that the scan bypass cache to avoid polluting it:
|
||||
opts.set<query::partition_slice::option::bypass_cache>();
|
||||
std::vector<query::clustering_range> ck_bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
auto partition_slice = query::partition_slice(std::move(ck_bounds), {}, std::move(regular_columns), opts);
|
||||
command = ::make_lw_shared<query::read_command>(s->id(), s->version(), partition_slice, proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
command = ::make_lw_shared<query::read_command>(s->id(), s->version(), partition_slice, proxy.get_max_result_size(partition_slice));
|
||||
executor::client_state client_state{executor::client_state::internal_tag()};
|
||||
tracing::trace_state_ptr trace_state;
|
||||
// NOTICE: empty_service_permit is used because the TTL service has fixed parallelism
|
||||
@@ -535,14 +521,13 @@ struct scan_ranges_context {
|
||||
// Scan data in a list of token ranges in one table, looking for expired
|
||||
// items and deleting them.
|
||||
// Because of issue #9167, partition_ranges must have a single partition
|
||||
// range for this code to work correctly.
|
||||
// for this code to work correctly.
|
||||
static future<> scan_table_ranges(
|
||||
service::storage_proxy& proxy,
|
||||
const scan_ranges_context& scan_ctx,
|
||||
dht::partition_range_vector&& partition_ranges,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
named_semaphore& page_sem)
|
||||
{
|
||||
const schema_ptr& s = scan_ctx.s;
|
||||
assert (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||
@@ -553,34 +538,13 @@ static future<> scan_table_ranges(
|
||||
co_return;
|
||||
}
|
||||
auto units = co_await get_units(page_sem, 1);
|
||||
// We don't need to limit page size in number of rows because there is
|
||||
// a builtin limit of the page's size in bytes. Setting this limit to
|
||||
// 1 is useful for debugging the paging code with moderate-size data.
|
||||
// We don't to limit page size in number of rows because there is a
|
||||
// builtin limit of the page's size in bytes. Setting this limit to 1
|
||||
// is useful for debugging the paging code with moderate-size data.
|
||||
uint32_t limit = std::numeric_limits<uint32_t>::max();
|
||||
// Read a page, and if that times out, try again after a small sleep.
|
||||
// If we didn't catch the timeout exception, it would cause the scan
|
||||
// be aborted and only be restarted at the next scanning period.
|
||||
// If we retry too many times, give up and restart the scan later.
|
||||
std::unique_ptr<cql3::result_set> rs;
|
||||
for (int retries=0; ; retries++) {
|
||||
try {
|
||||
// FIXME: which timeout?
|
||||
rs = co_await p->fetch_page(limit, gc_clock::now(), executor::default_timeout());
|
||||
break;
|
||||
} catch(exceptions::read_timeout_exception&) {
|
||||
tlogger.warn("expiration scanner read timed out, will retry: {}",
|
||||
std::current_exception());
|
||||
}
|
||||
// If we didn't break out of this loop, add a minimal sleep
|
||||
if (retries >= 10) {
|
||||
// Don't get stuck forever asking the same page, maybe there's
|
||||
// a bug or a real problem in several replicas. Give up on
|
||||
// this scan an retry the scan from a random position later,
|
||||
// in the next scan period.
|
||||
throw runtime_exception("scanner thread failed after too many timeouts for the same page");
|
||||
}
|
||||
co_await sleep_abortable(std::chrono::seconds(1), abort_source);
|
||||
}
|
||||
// FIXME: which timeout?
|
||||
// FIXME: if read times out, need to retry it.
|
||||
std::unique_ptr<cql3::result_set> rs = co_await p->fetch_page(limit, gc_clock::now(), executor::default_timeout());
|
||||
auto rows = rs->rows();
|
||||
auto meta = rs->get_metadata().get_names();
|
||||
std::optional<unsigned> expiration_column;
|
||||
@@ -595,7 +559,7 @@ static future<> scan_table_ranges(
|
||||
continue;
|
||||
}
|
||||
for (const auto& row : rows) {
|
||||
const managed_bytes_opt& cell = row[*expiration_column];
|
||||
const bytes_opt& cell = row[*expiration_column];
|
||||
if (!cell) {
|
||||
continue;
|
||||
}
|
||||
@@ -631,7 +595,6 @@ static future<> scan_table_ranges(
|
||||
expired = is_expired(n, now);
|
||||
}
|
||||
if (expired) {
|
||||
expiration_stats.items_deleted++;
|
||||
// FIXME: maybe don't recalculate new_timestamp() all the time
|
||||
// FIXME: if expire_item() throws on timeout, we need to retry it.
|
||||
auto ts = api::new_timestamp();
|
||||
@@ -643,7 +606,7 @@ static future<> scan_table_ranges(
|
||||
}
|
||||
}
|
||||
|
||||
// scan_table() scans, in one table, data "owned" by this shard, looking for
|
||||
// scan_table() scans data in one table "owned" by this shard, looking for
|
||||
// expired items and deleting them.
|
||||
// We consider each node to "own" its primary token ranges, i.e., the tokens
|
||||
// that this node is their first replica in the ring. Inside the node, each
|
||||
@@ -665,16 +628,13 @@ static future<> scan_table_ranges(
|
||||
static future<bool> scan_table(
|
||||
service::storage_proxy& proxy,
|
||||
data_dictionary::database db,
|
||||
gms::gossiper& gossiper,
|
||||
schema_ptr s,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
named_semaphore& page_sem)
|
||||
{
|
||||
// Check if an expiration-time attribute is enabled for this table.
|
||||
// If not, just return false immediately.
|
||||
// FIXME: the setting of the TTL may change in the middle of a long scan!
|
||||
std::optional<std::string> attribute_name = db::find_tag(*s, TTL_TAG_KEY);
|
||||
std::optional<std::string> attribute_name = find_tag(*s, TTL_TAG_KEY);
|
||||
if (!attribute_name) {
|
||||
co_return false;
|
||||
}
|
||||
@@ -715,10 +675,11 @@ static future<bool> scan_table(
|
||||
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
|
||||
co_return false;
|
||||
}
|
||||
expiration_stats.scan_table++;
|
||||
// FIXME: need to pace the scan, not do it all at once.
|
||||
// FIXME: consider if we should ask the scan without caching?
|
||||
// can we use cache but not fill it?
|
||||
scan_ranges_context scan_ctx{s, proxy, std::move(column_name), std::move(member)};
|
||||
token_ranges_owned_by_this_shard<primary> my_ranges(db.real_database(), gossiper, s);
|
||||
token_ranges_owned_by_this_shard<primary> my_ranges(db.real_database(), s);
|
||||
while (std::optional<dht::partition_range> range = my_ranges.next_partition_range()) {
|
||||
// Note that because of issue #9167 we need to run a separate
|
||||
// query on each partition range, and can't pass several of
|
||||
@@ -729,7 +690,7 @@ static future<bool> scan_table(
|
||||
// we fail the entire scan (and rescan from the beginning). Need to
|
||||
// reconsider this. Saving the scan position might be a good enough
|
||||
// solution for this problem.
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem);
|
||||
}
|
||||
// If each node only scans its own primary ranges, then when any node is
|
||||
// down part of the token range will not get scanned. This can be viewed
|
||||
@@ -738,12 +699,11 @@ static future<bool> scan_table(
|
||||
// by tasking another node to take over scanning of the dead node's primary
|
||||
// ranges. What we do here is that this node will also check expiration
|
||||
// on its *secondary* ranges - but only those whose primary owner is down.
|
||||
token_ranges_owned_by_this_shard<secondary> my_secondary_ranges(db.real_database(), gossiper, s);
|
||||
token_ranges_owned_by_this_shard<secondary> my_secondary_ranges(db.real_database(), s);
|
||||
while (std::optional<dht::partition_range> range = my_secondary_ranges.next_partition_range()) {
|
||||
expiration_stats.secondary_ranges_scanned++;
|
||||
dht::partition_range_vector partition_ranges;
|
||||
partition_ranges.push_back(std::move(*range));
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem);
|
||||
}
|
||||
co_return true;
|
||||
}
|
||||
@@ -756,7 +716,6 @@ future<> expiration_service::run() {
|
||||
// also need to notice when a new table is added, a table is
|
||||
// deleted or when ttl is enabled or disabled for a table!
|
||||
for (;;) {
|
||||
auto start = lowres_clock::now();
|
||||
// _db.tables() may change under our feet during a
|
||||
// long-living loop, so we must keep our own copy of the list of
|
||||
// schemas.
|
||||
@@ -770,7 +729,7 @@ future<> expiration_service::run() {
|
||||
co_return;
|
||||
}
|
||||
try {
|
||||
co_await scan_table(_proxy, _db, _gossiper, s, _abort_source, _page_sem, _expiration_stats);
|
||||
co_await scan_table(_proxy, _db, s, _abort_source, _page_sem);
|
||||
} catch (...) {
|
||||
// The scan of a table may fail in the middle for many
|
||||
// reasons, including network failure and even the table
|
||||
@@ -789,30 +748,17 @@ future<> expiration_service::run() {
|
||||
}
|
||||
}
|
||||
}
|
||||
_expiration_stats.scan_passes++;
|
||||
// The TTL scanner runs above once over all tables, at full steam.
|
||||
// After completing such a scan, we sleep until it's time start
|
||||
// another scan. TODO: If the scan went too fast, we can slow it down
|
||||
// in the next iteration by reducing the scanner's scheduling-group
|
||||
// share (if using a separate scheduling group), or introduce
|
||||
// finer-grain sleeps into the scanning code.
|
||||
std::chrono::milliseconds scan_duration(std::chrono::duration_cast<std::chrono::milliseconds>(lowres_clock::now() - start));
|
||||
std::chrono::milliseconds period(long(_db.get_config().alternator_ttl_period_in_seconds() * 1000));
|
||||
if (scan_duration < period) {
|
||||
try {
|
||||
tlogger.info("sleeping {} seconds until next period", (period - scan_duration).count()/1000.0);
|
||||
co_await seastar::sleep_abortable(period - scan_duration, _abort_source);
|
||||
} catch(seastar::sleep_aborted&) {}
|
||||
} else {
|
||||
tlogger.warn("scan took {} seconds, longer than period - not sleeping", scan_duration.count()/1000.0);
|
||||
}
|
||||
// FIXME: replace this silly 1-second sleep by something smarter.
|
||||
try {
|
||||
co_await seastar::sleep_abortable(std::chrono::seconds(1), _abort_source);
|
||||
} catch(seastar::sleep_aborted&) {}
|
||||
}
|
||||
}
|
||||
|
||||
future<> expiration_service::start() {
|
||||
// Called by main() on each shard to start the expiration-service
|
||||
// thread. Just runs run() in the background and allows stop().
|
||||
if (_db.features().alternator_ttl) {
|
||||
if (_db.features().cluster_supports_alternator_ttl()) {
|
||||
if (!shutting_down()) {
|
||||
_end = run().handle_exception([] (std::exception_ptr ep) {
|
||||
tlogger.error("expiration_service failed: {}", ep);
|
||||
@@ -834,18 +780,4 @@ future<> expiration_service::stop() {
|
||||
return std::move(*_end);
|
||||
}
|
||||
|
||||
expiration_service::stats::stats() {
|
||||
_metrics.add_group("expiration", {
|
||||
seastar::metrics::make_total_operations("scan_passes", scan_passes,
|
||||
seastar::metrics::description("number of passes over the database")),
|
||||
seastar::metrics::make_total_operations("scan_table", scan_table,
|
||||
seastar::metrics::description("number of table scans (counting each scan of each table that enabled expiration)")),
|
||||
seastar::metrics::make_total_operations("items_deleted", items_deleted,
|
||||
seastar::metrics::description("number of items deleted after expiration")),
|
||||
seastar::metrics::make_total_operations("secondary_ranges_scanned", secondary_ranges_scanned,
|
||||
seastar::metrics::description("number of token ranges scanned by this node while their primary owner was down")),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
} // namespace alternator
|
||||
|
||||
@@ -14,10 +14,6 @@
|
||||
#include <seastar/core/semaphore.hh>
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
|
||||
namespace gms {
|
||||
class gossiper;
|
||||
}
|
||||
|
||||
namespace replica {
|
||||
class database;
|
||||
}
|
||||
@@ -32,26 +28,8 @@ namespace alternator {
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLeave request.
|
||||
class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
|
||||
public:
|
||||
// Object holding per-shard statistics related to the expiration service.
|
||||
// While this object is alive, these metrics are also registered to be
|
||||
// visible by the metrics REST API, with the "expiration_" prefix.
|
||||
class stats {
|
||||
public:
|
||||
stats();
|
||||
uint64_t scan_passes = 0;
|
||||
uint64_t scan_table = 0;
|
||||
uint64_t items_deleted = 0;
|
||||
uint64_t secondary_ranges_scanned = 0;
|
||||
private:
|
||||
// The metric_groups object holds this stat object's metrics registered
|
||||
// as long as the stats object is alive.
|
||||
seastar::metrics::metric_groups _metrics;
|
||||
};
|
||||
private:
|
||||
data_dictionary::database _db;
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
// _end is set by start(), and resolves when the the background service
|
||||
// started by it ends. To ask the background service to end, _abort_source
|
||||
// should be triggered. stop() below uses both _abort_source and _end.
|
||||
@@ -60,12 +38,11 @@ private:
|
||||
// Ensures that at most 1 page of scan results at a time is processed by the TTL service
|
||||
named_semaphore _page_sem{1, named_semaphore_exception_factory{"alternator_ttl"}};
|
||||
bool shutting_down() { return _abort_source.abort_requested(); }
|
||||
stats _expiration_stats;
|
||||
public:
|
||||
// sharded_service<expiration_service>::start() creates this object on
|
||||
// all shards, so calls this constructor on each shard. Later, the
|
||||
// additional start() function should be invoked on all shards.
|
||||
expiration_service(data_dictionary::database, service::storage_proxy&, gms::gossiper&);
|
||||
expiration_service(data_dictionary::database, service::storage_proxy&);
|
||||
future<> start();
|
||||
future<> run();
|
||||
// sharded_service<expiration_service>::stop() calls the following stop()
|
||||
|
||||
15
amplify.yml
15
amplify.yml
@@ -1,15 +0,0 @@
|
||||
version: 1
|
||||
applications:
|
||||
- frontend:
|
||||
phases:
|
||||
build:
|
||||
commands:
|
||||
- make setupenv
|
||||
- make dirhtml
|
||||
artifacts:
|
||||
baseDirectory: _build/dirhtml
|
||||
files:
|
||||
- '**/*'
|
||||
cache:
|
||||
paths: []
|
||||
appRoot: docs
|
||||
@@ -1,71 +0,0 @@
|
||||
# Generate C++ sources from Swagger definitions
|
||||
set(swagger_files
|
||||
api-doc/authorization_cache.json
|
||||
api-doc/cache_service.json
|
||||
api-doc/collectd.json
|
||||
api-doc/column_family.json
|
||||
api-doc/commitlog.json
|
||||
api-doc/compaction_manager.json
|
||||
api-doc/config.json
|
||||
api-doc/endpoint_snitch_info.json
|
||||
api-doc/error_injection.json
|
||||
api-doc/failure_detector.json
|
||||
api-doc/gossiper.json
|
||||
api-doc/hinted_handoff.json
|
||||
api-doc/lsa.json
|
||||
api-doc/messaging_service.json
|
||||
api-doc/metrics.json
|
||||
api-doc/storage_proxy.json
|
||||
api-doc/storage_service.json
|
||||
api-doc/stream_manager.json
|
||||
api-doc/system.json
|
||||
api-doc/task_manager.json
|
||||
api-doc/task_manager_test.json
|
||||
api-doc/utils.json)
|
||||
|
||||
foreach(f ${swagger_files})
|
||||
get_filename_component(fname "${f}" NAME_WE)
|
||||
get_filename_component(dir "${f}" DIRECTORY)
|
||||
seastar_generate_swagger(
|
||||
TARGET scylla_swagger_gen_${fname}
|
||||
VAR scylla_swagger_gen_${fname}_files
|
||||
IN_FILE "${CMAKE_CURRENT_SOURCE_DIR}/${f}"
|
||||
OUT_DIR "${scylla_gen_build_dir}/api/${dir}")
|
||||
list(APPEND swagger_gen_files "${scylla_swagger_gen_${fname}_files}")
|
||||
endforeach()
|
||||
|
||||
add_library(api)
|
||||
target_sources(api
|
||||
PRIVATE
|
||||
api.cc
|
||||
cache_service.cc
|
||||
collectd.cc
|
||||
column_family.cc
|
||||
commitlog.cc
|
||||
compaction_manager.cc
|
||||
config.cc
|
||||
endpoint_snitch.cc
|
||||
error_injection.cc
|
||||
authorization_cache.cc
|
||||
failure_detector.cc
|
||||
gossiper.cc
|
||||
hinted_handoff.cc
|
||||
lsa.cc
|
||||
messaging_service.cc
|
||||
storage_proxy.cc
|
||||
storage_service.cc
|
||||
stream_manager.cc
|
||||
system.cc
|
||||
task_manager.cc
|
||||
task_manager_test.cc
|
||||
${swagger_gen_files})
|
||||
target_include_directories(api
|
||||
PUBLIC
|
||||
${CMAKE_SOURCE_DIR}
|
||||
${scylla_gen_build_dir})
|
||||
target_link_libraries(api
|
||||
idl
|
||||
wasmtime_bindings
|
||||
|
||||
Seastar::seastar
|
||||
xxHash::xxhash)
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/authorization_cache",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/authorization_cache/reset",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Reset cache",
|
||||
"type":"void",
|
||||
"nickname":"authorization_cache_reset",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
}
|
||||
}
|
||||
@@ -84,14 +84,6 @@
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when the table is flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"split_output",
|
||||
"description":"true if the output of the major compaction should be split in several sstables",
|
||||
@@ -445,68 +437,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/column_family/tombstone_gc/{name}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Check if tombstone GC is enabled for a given table",
|
||||
"type":"boolean",
|
||||
"nickname":"get_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Enable tombstone GC for a given table",
|
||||
"type":"void",
|
||||
"nickname":"enable_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Disable tombstone GC for a given table",
|
||||
"type":"void",
|
||||
"nickname":"disable_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/column_family/estimate_keys/{name}",
|
||||
"operations":[
|
||||
|
||||
@@ -134,7 +134,7 @@
|
||||
},
|
||||
{
|
||||
"name":"tables",
|
||||
"description":"Comma-separated tables to stop compaction in",
|
||||
"description":"Comma-seperated tables to stop compaction in",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
|
||||
@@ -34,14 +34,6 @@
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"parameters",
|
||||
"description":"dict of parameters to pass to the injection (json format)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"dict",
|
||||
"paramType":"body"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -66,30 +58,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/v2/error_injection/injection/{injection}/message",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Send message to trigger an event in injection's code",
|
||||
"type":"void",
|
||||
"nickname":"message_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name, should correspond to an injection added in code",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/v2/error_injection/injection",
|
||||
"operations":[
|
||||
@@ -118,15 +86,5 @@
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"components":{
|
||||
"schemas": {
|
||||
"dict": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -245,7 +245,7 @@
|
||||
"GOSSIP_SHUTDOWN",
|
||||
"DEFINITIONS_UPDATE",
|
||||
"TRUNCATE",
|
||||
"UNUSED__REPLICATION_FINISHED",
|
||||
"REPLICATION_FINISHED",
|
||||
"MIGRATION_REQUEST",
|
||||
"PREPARE_MESSAGE",
|
||||
"PREPARE_DONE_MESSAGE",
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
"metrics_config": {
|
||||
"id": "metrics_config",
|
||||
"summary": "An entry in the metrics configuration",
|
||||
"properties": {
|
||||
"source_labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "The source labels, a match is based on concatination of the labels"
|
||||
},
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "The action to perfrom on match",
|
||||
"enum": ["skip_when_empty", "report_when_empty", "replace", "keep", "drop", "drop_label"]
|
||||
},
|
||||
"target_label": {
|
||||
"type": "string",
|
||||
"description": "The application state version"
|
||||
},
|
||||
"replacement": {
|
||||
"type": "string",
|
||||
"description": "The replacement string to use when replacing a value"
|
||||
},
|
||||
"regex": {
|
||||
"type": "string",
|
||||
"description": "The regex string to use when replacing a value"
|
||||
},
|
||||
"separator": {
|
||||
"type": "string",
|
||||
"description": "The separator string to use when concatinating the labels"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
"/v2/metrics-config/":{
|
||||
"get":{
|
||||
"description":"Return the metrics layer configuration",
|
||||
"operationId":"get_metrics_config",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"tags":[
|
||||
"metrics"
|
||||
],
|
||||
"parameters":[
|
||||
],
|
||||
"responses":{
|
||||
"200":{
|
||||
"schema": {
|
||||
"type":"array",
|
||||
"items":{
|
||||
"$ref":"#/definitions/metrics_config",
|
||||
"description":"metrics Config value"
|
||||
}
|
||||
}
|
||||
},
|
||||
"default":{
|
||||
"description":"unexpected error",
|
||||
"schema":{
|
||||
"$ref":"#/definitions/ErrorModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"description":"Set the metrics layer relabel configuration",
|
||||
"operationId":"set_metrics_config",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"tags":[
|
||||
"metrics"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"in":"body",
|
||||
"name":"conf",
|
||||
"description":"An array of relabel_config objects",
|
||||
"schema": {
|
||||
"type":"array",
|
||||
"items":{
|
||||
"$ref":"#/definitions/metrics_config",
|
||||
"description":"metrics Config value"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses":{
|
||||
"200":{
|
||||
"description": "OK"
|
||||
},
|
||||
"default":{
|
||||
"description":"unexpected error",
|
||||
"schema":{
|
||||
"$ref":"#/definitions/ErrorModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/raft",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/raft/trigger_snapshot/{group_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Triggers snapshot creation and log truncation for the given Raft group",
|
||||
"type":"string",
|
||||
"nickname":"trigger_snapshot",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"group_id",
|
||||
"description":"The ID of the group which should get snapshotted",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"Timeout in seconds after which the endpoint returns a failure. If not provided, 60s is used.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -465,7 +465,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Retrieve the mapping of endpoint to host ID of all nodes that own tokens",
|
||||
"summary":"Retrieve the mapping of endpoint to host ID",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"mapper"
|
||||
@@ -667,7 +667,7 @@
|
||||
},
|
||||
{
|
||||
"name":"kn",
|
||||
"description":"Comma-separated keyspaces name that their snapshot will be deleted",
|
||||
"description":"Comma seperated keyspaces name that their snapshot will be deleted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -701,30 +701,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/compact",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Forces major compaction in all keyspaces",
|
||||
"type":"void",
|
||||
"nickname":"force_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when tables were flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/keyspace_compaction/{keyspace}",
|
||||
"operations":[
|
||||
@@ -747,19 +723,11 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when tables were flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -787,7 +755,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -819,7 +787,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table names",
|
||||
"description":"Comma-seperated table names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -894,7 +862,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -934,7 +902,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -944,21 +912,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/flush",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Flush all memtables in all keyspaces.",
|
||||
"type":"void",
|
||||
"nickname":"force_flush",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/keyspace_flush/{keyspace}",
|
||||
"operations":[
|
||||
@@ -981,7 +934,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -1161,14 +1114,6 @@
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"ranges_parallelism",
|
||||
"description":"An integer specifying the number of ranges to repair in parallel by user request. If this number is bigger than the max_repair_ranges_in_parallel calculated by Scylla core, the smaller one will be used.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -1283,7 +1228,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Removes a node from the cluster. Replicated data that logically belonged to this node is redistributed among the remaining nodes.",
|
||||
"summary":"Removes token (and all data associated with enpoint that had it) from the ring",
|
||||
"type":"void",
|
||||
"nickname":"remove_node",
|
||||
"produces":[
|
||||
@@ -1300,7 +1245,7 @@
|
||||
},
|
||||
{
|
||||
"name":"ignore_nodes",
|
||||
"description":"Comma-separated list of dead nodes to ignore in removenode operation. Use the same method for all nodes to ignore: either Host IDs or ip addresses.",
|
||||
"description":"List of dead nodes to ingore in removenode operation",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2001,7 +1946,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Forces this node to recalculate versions of schema objects.",
|
||||
"summary":"Reset local schema",
|
||||
"type":"void",
|
||||
"nickname":"reset_local_schema",
|
||||
"produces":[
|
||||
@@ -2128,7 +2073,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2155,66 +2100,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/tombstone_gc/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Enable tombstone GC",
|
||||
"type":"void",
|
||||
"nickname":"enable_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Disable tombstone GC",
|
||||
"type":"void",
|
||||
"nickname":"disable_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated column family names",
|
||||
"description":"Comma seperated column family names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -2542,23 +2428,7 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/raft_topology/reload",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Reload Raft topology state from disk.",
|
||||
"type":"void",
|
||||
"nickname":"reload_raft_topology_state",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
"mapper":{
|
||||
@@ -2761,7 +2631,7 @@
|
||||
"description":"File creation time"
|
||||
},
|
||||
"generation":{
|
||||
"type":"string",
|
||||
"type":"long",
|
||||
"description":"SSTable generation"
|
||||
},
|
||||
"level":{
|
||||
@@ -2771,7 +2641,7 @@
|
||||
"version":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"ka", "la", "mc", "md", "me"
|
||||
"ka", "la", "mc", "md"
|
||||
],
|
||||
"description":"SSTable version"
|
||||
},
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
}
|
||||
},
|
||||
"host": "{{Host}}",
|
||||
"basePath": "/",
|
||||
"basePath": "/v2",
|
||||
"schemes": [
|
||||
"http"
|
||||
],
|
||||
|
||||
@@ -52,45 +52,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/log",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Write a message to the Scylla log",
|
||||
"type":"void",
|
||||
"nickname":"write_log_message",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"message",
|
||||
"description":"The message to write to the log",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"level",
|
||||
"description":"The logging level to use",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"error",
|
||||
"warn",
|
||||
"info",
|
||||
"debug",
|
||||
"trace"
|
||||
],
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/drop_sstable_caches",
|
||||
"operations":[
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/task_manager",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/task_manager/list_modules",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all modules names",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"nickname":"get_modules",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/list_module_tasks/{module}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get a list of tasks",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"task_stats"
|
||||
},
|
||||
"nickname":"get_tasks",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"module",
|
||||
"description":"The module to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"internal",
|
||||
"description":"Boolean flag indicating whether internal tasks should be shown (false by default)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"table",
|
||||
"description":"The table to query about",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/task_status/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get task status",
|
||||
"type":"task_status",
|
||||
"nickname":"get_task_status",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/abort_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Abort running task and its descendants",
|
||||
"type":"void",
|
||||
"nickname":"abort_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to abort",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/wait_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Wait for a task to complete",
|
||||
"type":"task_status",
|
||||
"nickname":"wait_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to wait for",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/task_status_recursive/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get statuses of the task and all its descendants",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"task_status"
|
||||
},
|
||||
"nickname":"get_task_status_recursively",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/ttl",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Set ttl in seconds and get last value",
|
||||
"type":"long",
|
||||
"nickname":"get_and_update_ttl",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"ttl",
|
||||
"description":"The number of seconds for which the tasks will be kept in memory after it finishes",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
"task_stats" :{
|
||||
"id": "task_stats",
|
||||
"description":"A task statistics object",
|
||||
"properties":{
|
||||
"task_id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of a task"
|
||||
},
|
||||
"state":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of a task"
|
||||
},
|
||||
"type":{
|
||||
"type":"string",
|
||||
"description":"The description of the task"
|
||||
},
|
||||
"scope":{
|
||||
"type":"string",
|
||||
"description":"The scope of the task"
|
||||
},
|
||||
"keyspace":{
|
||||
"type":"string",
|
||||
"description":"The keyspace the task is working on (if applicable)"
|
||||
},
|
||||
"table":{
|
||||
"type":"string",
|
||||
"description":"The table the task is working on (if applicable)"
|
||||
},
|
||||
"entity":{
|
||||
"type":"string",
|
||||
"description":"Task-specific entity description"
|
||||
},
|
||||
"sequence_number":{
|
||||
"type":"long",
|
||||
"description":"The running sequence number of the task"
|
||||
}
|
||||
}
|
||||
},
|
||||
"task_status":{
|
||||
"id":"task_status",
|
||||
"description":"A task status object",
|
||||
"properties":{
|
||||
"id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of the task"
|
||||
},
|
||||
"type":{
|
||||
"type":"string",
|
||||
"description":"The description of the task"
|
||||
},
|
||||
"scope":{
|
||||
"type":"string",
|
||||
"description":"The scope of the task"
|
||||
},
|
||||
"state":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of the task"
|
||||
},
|
||||
"is_abortable":{
|
||||
"type":"boolean",
|
||||
"description":"Boolean flag indicating whether the task can be aborted"
|
||||
},
|
||||
"start_time":{
|
||||
"type":"datetime",
|
||||
"description":"The start time of the task"
|
||||
},
|
||||
"end_time":{
|
||||
"type":"datetime",
|
||||
"description":"The end time of the task (unspecified when the task is not completed)"
|
||||
},
|
||||
"error":{
|
||||
"type":"string",
|
||||
"description":"Error string, if the task failed"
|
||||
},
|
||||
"parent_id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of the parent task"
|
||||
},
|
||||
"sequence_number":{
|
||||
"type":"long",
|
||||
"description":"The running sequence number of the task"
|
||||
},
|
||||
"shard":{
|
||||
"type":"long",
|
||||
"description":"The number of a shard the task is running on"
|
||||
},
|
||||
"keyspace":{
|
||||
"type":"string",
|
||||
"description":"The keyspace the task is working on (if applicable)"
|
||||
},
|
||||
"table":{
|
||||
"type":"string",
|
||||
"description":"The table the task is working on (if applicable)"
|
||||
},
|
||||
"entity":{
|
||||
"type":"string",
|
||||
"description":"Task-specific entity description"
|
||||
},
|
||||
"progress_units":{
|
||||
"type":"string",
|
||||
"description":"A description of the progress units"
|
||||
},
|
||||
"progress_total":{
|
||||
"type":"double",
|
||||
"description":"The total number of units to complete for the task"
|
||||
},
|
||||
"progress_completed":{
|
||||
"type":"double",
|
||||
"description":"The number of units completed so far"
|
||||
},
|
||||
"children_ids":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"description":"Task IDs of children of this task"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/task_manager_test",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/task_manager_test/test_module",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Register test module in task manager",
|
||||
"type":"void",
|
||||
"nickname":"register_test_module",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Unregister test module in task manager",
|
||||
"type":"void",
|
||||
"nickname":"unregister_test_module",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/test_task",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Register test task",
|
||||
"type":"string",
|
||||
"nickname":"register_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to register",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"shard",
|
||||
"description":"The shard of the task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"parent_id",
|
||||
"description":"The uuid of a parent task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace the task is working on",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"table",
|
||||
"description":"The table the task is working on",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"entity",
|
||||
"description":"Task-specific entity description",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Unregister test task",
|
||||
"type":"void",
|
||||
"nickname":"unregister_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to register",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/finish_test_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Finish test task",
|
||||
"type":"void",
|
||||
"nickname":"finish_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to finish",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"error",
|
||||
"description":"The error with which task fails (if it does)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
129
api/api.cc
129
api/api.cc
@@ -24,19 +24,14 @@
|
||||
#include "compaction_manager.hh"
|
||||
#include "hinted_handoff.hh"
|
||||
#include "error_injection.hh"
|
||||
#include "authorization_cache.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "stream_manager.hh"
|
||||
#include "system.hh"
|
||||
#include "api/config.hh"
|
||||
#include "task_manager.hh"
|
||||
#include "task_manager_test.hh"
|
||||
#include "raft.hh"
|
||||
|
||||
logging::logger apilog("api");
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
static std::unique_ptr<reply> exception_reply(std::exception_ptr eptr) {
|
||||
try {
|
||||
@@ -61,10 +56,8 @@ future<> set_server_init(http_context& ctx) {
|
||||
rb->set_api_doc(r);
|
||||
rb02->set_api_doc(r);
|
||||
rb02->register_api_file(r, "swagger20_header");
|
||||
rb02->register_api_file(r, "metrics");
|
||||
rb->register_function(r, "system",
|
||||
"The system related API");
|
||||
rb02->add_definitions_file(r, "metrics");
|
||||
set_system(ctx, r);
|
||||
});
|
||||
}
|
||||
@@ -72,7 +65,7 @@ future<> set_server_init(http_context& ctx) {
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg) {
|
||||
auto rb02 = std::make_shared < api_registry_builder20 > (ctx.api_doc, "/v2");
|
||||
return ctx.http_server.set_routes([&ctx, &cfg, rb02](routes& r) {
|
||||
set_config(rb02, ctx, r, cfg, false);
|
||||
set_config(rb02, ctx, r, cfg);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -103,16 +96,12 @@ future<> unset_rpc_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_rpc_controller(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client& group0_client) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &group0_client] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, group0_client);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &g, &cdc_gs] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, g.local(), cdc_gs);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_storage_service(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_storage_service(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader) {
|
||||
return ctx.http_server.set_routes([&ctx, &sst_loader] (routes& r) { set_sstables_loader(ctx, r, sst_loader); });
|
||||
}
|
||||
@@ -137,17 +126,6 @@ future<> unset_server_repair(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_repair(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_authorization_cache(http_context &ctx, sharded<auth::service> &auth_service) {
|
||||
return register_api(ctx, "authorization_cache",
|
||||
"The authorization cache API", [&auth_service] (http_context &ctx, routes &r) {
|
||||
set_authorization_cache(ctx, r, auth_service);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_authorization_cache(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_authorization_cache(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &snap_ctl] (routes& r) { set_snapshot(ctx, r, snap_ctl); });
|
||||
}
|
||||
@@ -156,14 +134,8 @@ future<> unset_server_snapshot(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_snapshot(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", [&snitch] (http_context& ctx, routes& r) {
|
||||
set_endpoint_snitch(ctx, r, snitch);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_snitch(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_endpoint_snitch(ctx, r); });
|
||||
future<> set_server_snitch(http_context& ctx) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", set_endpoint_snitch);
|
||||
}
|
||||
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g) {
|
||||
@@ -173,15 +145,9 @@ future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g) {
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_load_sstable(http_context& ctx, sharded<db::system_keyspace>& sys_ks) {
|
||||
future<> set_server_load_sstable(http_context& ctx) {
|
||||
return register_api(ctx, "column_family",
|
||||
"The column family API", [&sys_ks] (http_context& ctx, routes& r) {
|
||||
set_column_family(ctx, r, sys_ks);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_load_sstable(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_column_family(ctx, r); });
|
||||
"The column family API", set_column_family);
|
||||
}
|
||||
|
||||
future<> set_server_messaging_service(http_context& ctx, sharded<netw::messaging_service>& ms) {
|
||||
@@ -194,17 +160,13 @@ future<> unset_server_messaging_service(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_messaging_service(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_proxy>& proxy) {
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_service>& ss) {
|
||||
return register_api(ctx, "storage_proxy",
|
||||
"The storage proxy API", [&proxy] (http_context& ctx, routes& r) {
|
||||
set_storage_proxy(ctx, r, proxy);
|
||||
"The storage proxy API", [&ss] (http_context& ctx, routes& r) {
|
||||
set_storage_proxy(ctx, r, ss);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_storage_proxy(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_storage_proxy(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm) {
|
||||
return register_api(ctx, "stream_manager",
|
||||
"The stream manager API", [&sm] (http_context& ctx, routes& r) {
|
||||
@@ -221,10 +183,10 @@ future<> set_server_cache(http_context& ctx) {
|
||||
"The cache service API", set_cache_service);
|
||||
}
|
||||
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& proxy) {
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<gms::gossiper>& g) {
|
||||
return register_api(ctx, "hinted_handoff",
|
||||
"The hinted handoff API", [&proxy] (http_context& ctx, routes& r) {
|
||||
set_hinted_handoff(ctx, r, proxy);
|
||||
"The hinted handoff API", [&g] (http_context& ctx, routes& r) {
|
||||
set_hinted_handoff(ctx, r, g.local());
|
||||
});
|
||||
}
|
||||
|
||||
@@ -271,68 +233,5 @@ future<> set_server_done(http_context& ctx) {
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_task_manager(http_context& ctx, lw_shared_ptr<db::config> cfg) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &cfg = *cfg](routes& r) {
|
||||
rb->register_function(r, "task_manager",
|
||||
"The task manager API");
|
||||
set_task_manager(ctx, r, cfg);
|
||||
});
|
||||
}
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
future<> set_server_task_manager_test(http_context& ctx) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx](routes& r) mutable {
|
||||
rb->register_function(r, "task_manager_test",
|
||||
"The task manager test API");
|
||||
set_task_manager_test(ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
future<> set_server_raft(http_context& ctx, sharded<service::raft_group_registry>& raft_gr) {
|
||||
auto rb = std::make_shared<api_registry_builder>(ctx.api_doc);
|
||||
return ctx.http_server.set_routes([rb, &ctx, &raft_gr] (routes& r) {
|
||||
rb->register_function(r, "raft", "The Raft API");
|
||||
set_raft(ctx, r, raft_gr);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_raft(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_raft(ctx, r); });
|
||||
}
|
||||
|
||||
void req_params::process(const request& req) {
|
||||
// Process mandatory parameters
|
||||
for (auto& [name, ent] : params) {
|
||||
if (!ent.is_mandatory) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
ent.value = req.get_path_param(name);
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Mandatory parameter '{}' was not provided", name));
|
||||
}
|
||||
}
|
||||
|
||||
// Process optional parameters
|
||||
for (auto& [name, value] : req.query_parameters) {
|
||||
try {
|
||||
auto& ent = params.at(name);
|
||||
if (ent.is_mandatory) {
|
||||
throw httpd::bad_param_exception(fmt::format("Parameter '{}' is expected to be provided as part of the request url", name));
|
||||
}
|
||||
ent.value = value;
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Unsupported optional parameter '{}'", name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
83
api/api.hh
83
api/api.hh
@@ -27,7 +27,7 @@ template<class T>
|
||||
std::vector<sstring> container_to_vec(const T& container) {
|
||||
std::vector<sstring> res;
|
||||
for (auto i : container) {
|
||||
res.push_back(fmt::to_string(i));
|
||||
res.push_back(boost::lexical_cast<std::string>(i));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@@ -47,8 +47,8 @@ template<class T, class MAP>
|
||||
std::vector<T>& map_to_key_value(const MAP& map, std::vector<T>& res) {
|
||||
for (auto i : map) {
|
||||
T val;
|
||||
val.key = fmt::to_string(i.first);
|
||||
val.value = fmt::to_string(i.second);
|
||||
val.key = boost::lexical_cast<std::string>(i.first);
|
||||
val.value = boost::lexical_cast<std::string>(i.second);
|
||||
res.push_back(val);
|
||||
}
|
||||
return res;
|
||||
@@ -65,7 +65,7 @@ template <typename MAP>
|
||||
std::vector<sstring> map_keys(const MAP& map) {
|
||||
std::vector<sstring> res;
|
||||
for (const auto& i : map) {
|
||||
res.push_back(fmt::to_string(i.first));
|
||||
res.push_back(boost::lexical_cast<std::string>(i.first));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@@ -137,14 +137,6 @@ future<json::json_return_type> sum_timer_stats(distributed<T>& d, utils::timed_
|
||||
});
|
||||
}
|
||||
|
||||
template<class T, class F>
|
||||
future<json::json_return_type> sum_timer_stats(distributed<T>& d, utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
return d.map_reduce0([f](const T& p) {return (p.get_stats().*f).rate();}, utils::rate_moving_average_and_histogram(),
|
||||
std::plus<utils::rate_moving_average_and_histogram>()).then([](const utils::rate_moving_average_and_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(timer_to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
inline int64_t min_int64(int64_t a, int64_t b) {
|
||||
return std::min(a,b);
|
||||
}
|
||||
@@ -189,7 +181,7 @@ struct basic_ratio_holder : public json::jsonable {
|
||||
typedef basic_ratio_holder<double> ratio_holder;
|
||||
typedef basic_ratio_holder<int64_t> integral_ratio_holder;
|
||||
|
||||
class unimplemented_exception : public httpd::base_exception {
|
||||
class unimplemented_exception : public base_exception {
|
||||
public:
|
||||
unimplemented_exception()
|
||||
: base_exception("API call is not supported yet", reply::status_type::internal_server_error) {
|
||||
@@ -238,74 +230,13 @@ public:
|
||||
value = T{boost::lexical_cast<Base>(param)};
|
||||
}
|
||||
} catch (boost::bad_lexical_cast&) {
|
||||
throw httpd::bad_param_exception(format("{} ({}): type error - should be {}", name, param, boost::units::detail::demangle(typeid(Base).name())));
|
||||
throw bad_param_exception(format("{} ({}): type error - should be {}", name, param, boost::units::detail::demangle(typeid(Base).name())));
|
||||
}
|
||||
}
|
||||
|
||||
operator T() const { return value; }
|
||||
};
|
||||
|
||||
using mandatory = bool_class<struct mandatory_tag>;
|
||||
|
||||
class req_params {
|
||||
public:
|
||||
struct def {
|
||||
std::optional<sstring> value;
|
||||
mandatory is_mandatory = mandatory::no;
|
||||
|
||||
def(std::optional<sstring> value_ = std::nullopt, mandatory is_mandatory_ = mandatory::no)
|
||||
: value(std::move(value_))
|
||||
, is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
|
||||
def(mandatory is_mandatory_)
|
||||
: is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
};
|
||||
|
||||
private:
|
||||
std::unordered_map<sstring, def> params;
|
||||
|
||||
public:
|
||||
req_params(std::initializer_list<std::pair<sstring, def>> l) {
|
||||
for (const auto& [name, ent] : l) {
|
||||
add(std::move(name), std::move(ent));
|
||||
}
|
||||
}
|
||||
|
||||
void add(sstring name, def ent) {
|
||||
params.emplace(std::move(name), std::move(ent));
|
||||
}
|
||||
|
||||
void process(const request& req);
|
||||
|
||||
const std::optional<sstring>& get(const char* name) const {
|
||||
return params.at(name).value;
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
const std::optional<T> get_as(const char* name) const {
|
||||
return get(name);
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
requires std::same_as<T, bool>
|
||||
const std::optional<bool> get_as(const char* name) const {
|
||||
auto value = get(name);
|
||||
if (!value) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::transform(value->begin(), value->end(), value->begin(), ::tolower);
|
||||
if (value == "true" || value == "yes" || value == "1") {
|
||||
return true;
|
||||
}
|
||||
if (value == "false" || value == "no" || value == "0") {
|
||||
return false;
|
||||
}
|
||||
throw boost::bad_lexical_cast{};
|
||||
}
|
||||
};
|
||||
|
||||
httpd::utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimated_histogram& val);
|
||||
utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimated_histogram& val);
|
||||
|
||||
}
|
||||
|
||||
@@ -11,19 +11,13 @@
|
||||
#include <seastar/core/future.hh>
|
||||
|
||||
#include "replica/database_fwd.hh"
|
||||
#include "tasks/task_manager.hh"
|
||||
#include "seastarx.hh"
|
||||
|
||||
using request = http::request;
|
||||
using reply = http::reply;
|
||||
|
||||
namespace service {
|
||||
|
||||
class load_meter;
|
||||
class storage_proxy;
|
||||
class storage_service;
|
||||
class raft_group0_client;
|
||||
class raft_group_registry;
|
||||
|
||||
} // namespace service
|
||||
|
||||
@@ -37,7 +31,6 @@ namespace locator {
|
||||
|
||||
class token_metadata;
|
||||
class shared_token_metadata;
|
||||
class snitch_ptr;
|
||||
|
||||
} // namespace locator
|
||||
|
||||
@@ -49,10 +42,10 @@ class config;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
namespace cdc { class generation_service; }
|
||||
|
||||
namespace gms {
|
||||
|
||||
@@ -60,8 +53,6 @@ class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth { class service; }
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context {
|
||||
@@ -69,13 +60,14 @@ struct http_context {
|
||||
sstring api_doc;
|
||||
httpd::http_server_control http_server;
|
||||
distributed<replica::database>& db;
|
||||
distributed<service::storage_proxy>& sp;
|
||||
service::load_meter& lmeter;
|
||||
const sharded<locator::shared_token_metadata>& shared_token_metadata;
|
||||
sharded<tasks::task_manager>& tm;
|
||||
|
||||
http_context(distributed<replica::database>& _db,
|
||||
service::load_meter& _lm, const sharded<locator::shared_token_metadata>& _stm, sharded<tasks::task_manager>& _tm)
|
||||
: db(_db), lmeter(_lm), shared_token_metadata(_stm), tm(_tm) {
|
||||
distributed<service::storage_proxy>& _sp,
|
||||
service::load_meter& _lm, const sharded<locator::shared_token_metadata>& _stm)
|
||||
: db(_db), sp(_sp), lmeter(_lm), shared_token_metadata(_stm) {
|
||||
}
|
||||
|
||||
const locator::token_metadata& get_token_metadata();
|
||||
@@ -83,10 +75,8 @@ struct http_context {
|
||||
|
||||
future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg);
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
|
||||
future<> unset_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client&);
|
||||
future<> unset_server_storage_service(http_context& ctx);
|
||||
future<> set_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<gms::gossiper>& g, sharded<cdc::generation_service>& cdc_gs);
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
||||
future<> unset_server_sstables_loader(http_context& ctx);
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb);
|
||||
@@ -97,28 +87,20 @@ future<> set_transport_controller(http_context& ctx, cql_transport::controller&
|
||||
future<> unset_transport_controller(http_context& ctx);
|
||||
future<> set_rpc_controller(http_context& ctx, thrift_controller& ctl);
|
||||
future<> unset_rpc_controller(http_context& ctx);
|
||||
future<> set_server_authorization_cache(http_context& ctx, sharded<auth::service> &auth_service);
|
||||
future<> unset_server_authorization_cache(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
future<> unset_server_snapshot(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> set_server_load_sstable(http_context& ctx, sharded<db::system_keyspace>& sys_ks);
|
||||
future<> unset_server_load_sstable(http_context& ctx);
|
||||
future<> set_server_load_sstable(http_context& ctx);
|
||||
future<> set_server_messaging_service(http_context& ctx, sharded<netw::messaging_service>& ms);
|
||||
future<> unset_server_messaging_service(http_context& ctx);
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_proxy>& proxy);
|
||||
future<> unset_server_storage_proxy(http_context& ctx);
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_service>& ss);
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm);
|
||||
future<> unset_server_stream_manager(http_context& ctx);
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& p);
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> unset_hinted_handoff(http_context& ctx);
|
||||
future<> set_server_gossip_settle(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> set_server_cache(http_context& ctx);
|
||||
future<> set_server_compaction_manager(http_context& ctx);
|
||||
future<> set_server_done(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx, lw_shared_ptr<db::config> cfg);
|
||||
future<> set_server_task_manager_test(http_context& ctx);
|
||||
future<> set_server_raft(http_context&, sharded<service::raft_group_registry>&);
|
||||
future<> unset_server_raft(http_context&);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "api/api-doc/authorization_cache.json.hh"
|
||||
|
||||
#include "api/authorization_cache.hh"
|
||||
#include "api/api.hh"
|
||||
#include "auth/common.hh"
|
||||
#include "auth/service.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
void set_authorization_cache(http_context& ctx, routes& r, sharded<auth::service> &auth_service) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.set(r, [&auth_service] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await auth_service.invoke_on_all([] (auth::service& auth) -> future<> {
|
||||
auth.reset_authorization_cache();
|
||||
return make_ready_future<>();
|
||||
});
|
||||
|
||||
co_return json_void();
|
||||
});
|
||||
}
|
||||
|
||||
void unset_authorization_cache(http_context& ctx, routes& r) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_authorization_cache(http_context& ctx, httpd::routes& r, sharded<auth::service> &auth_service);
|
||||
void unset_authorization_cache(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
@@ -12,128 +12,127 @@
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
namespace cs = httpd::cache_service_json;
|
||||
|
||||
void set_cache_service(http_context& ctx, routes& r) {
|
||||
cs::get_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// We never save the cache
|
||||
// Origin uses 0 for never
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto period = req->get_query_param("period");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// We never save the cache
|
||||
// Origin uses 0 for never
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto period = req->get_query_param("period");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// We never save the cache
|
||||
// Origin uses 0 for never
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto ccspis = req->get_query_param("ccspis");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_row_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_row_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_row_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_row_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto rckts = req->get_query_param("rckts");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_key_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_key_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_key_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_key_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto kckts = req->get_query_param("kckts");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_counter_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_counter_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_counter_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto cckts = req->get_query_param("cckts");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::invalidate_key_cache.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::invalidate_key_cache.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::invalidate_counter_cache.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::invalidate_counter_cache.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::set_row_cache_capacity_in_mb.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_row_cache_capacity_in_mb.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto capacity = req->get_query_param("capacity");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::set_key_cache_capacity_in_mb.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_key_cache_capacity_in_mb.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto period = req->get_query_param("period");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::set_counter_cache_capacity_in_mb.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_counter_cache_capacity_in_mb.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto capacity = req->get_query_param("capacity");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::save_caches.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::save_caches.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_key_capacity.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_capacity.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -141,7 +140,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_hits.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_hits.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -149,7 +148,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_requests.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_requests.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -157,7 +156,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_hit_rate.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_hit_rate.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -165,21 +164,21 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_hits_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_key_requests_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_key_size.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_size.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -187,7 +186,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_entries.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_entries.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -195,7 +194,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_row_capacity.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_capacity.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().region().occupancy().used_space();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
@@ -203,26 +202,26 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const replica::column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.count();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_requests.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_requests.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const replica::column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.count() + cf.get_row_cache().stats().misses.count();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_hit_rate.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_hit_rate.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [](const replica::column_family& cf) {
|
||||
return ratio_holder(cf.get_row_cache().stats().hits.count() + cf.get_row_cache().stats().misses.count(),
|
||||
cf.get_row_cache().stats().hits.count());
|
||||
}, std::plus<ratio_holder>());
|
||||
});
|
||||
|
||||
cs::get_row_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
@@ -230,7 +229,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_row_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate() + cf.get_row_cache().stats().misses.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
@@ -238,7 +237,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// In origin row size is the weighted size.
|
||||
// We currently do not support weights, so we use num entries instead
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
@@ -248,7 +247,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
@@ -256,7 +255,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_counter_capacity.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_capacity.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -264,7 +263,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_hits.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_hits.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -272,7 +271,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_requests.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_requests.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -280,7 +279,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_hit_rate.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_hit_rate.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -288,21 +287,21 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_hits_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_counter_requests_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_counter_size.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_size.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -310,7 +309,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_entries.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_entries.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_cache_service(http_context& ctx, httpd::routes& r);
|
||||
void set_cache_service(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -29,11 +29,8 @@ static auto transformer(const std::vector<collectd_value>& values) {
|
||||
case scollectd::data_type::GAUGE:
|
||||
collected_value.values.push(v.d());
|
||||
break;
|
||||
case scollectd::data_type::COUNTER:
|
||||
collected_value.values.push(v.ui());
|
||||
break;
|
||||
case scollectd::data_type::REAL_COUNTER:
|
||||
collected_value.values.push(v.d());
|
||||
case scollectd::data_type::DERIVE:
|
||||
collected_value.values.push(v.i());
|
||||
break;
|
||||
default:
|
||||
collected_value.values.push(v.ui());
|
||||
@@ -52,9 +49,9 @@ static const char* str_to_regex(const sstring& v) {
|
||||
}
|
||||
|
||||
void set_collectd(http_context& ctx, routes& r) {
|
||||
cd::get_collectd.set(r, [](std::unique_ptr<request> req) {
|
||||
cd::get_collectd.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
|
||||
auto id = ::make_shared<scollectd::type_instance_id>(req->get_path_param("pluginid"),
|
||||
auto id = ::make_shared<scollectd::type_instance_id>(req->param["pluginid"],
|
||||
req->get_query_param("instance"), req->get_query_param("type"),
|
||||
req->get_query_param("type_instance"));
|
||||
|
||||
@@ -91,7 +88,7 @@ void set_collectd(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cd::enable_collectd.set(r, [](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
std::regex plugin(req->get_path_param("pluginid").c_str());
|
||||
std::regex plugin(req->param["pluginid"].c_str());
|
||||
std::regex instance(str_to_regex(req->get_query_param("instance")));
|
||||
std::regex type(str_to_regex(req->get_query_param("type")));
|
||||
std::regex type_instance(str_to_regex(req->get_query_param("type_instance")));
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_collectd(http_context& ctx, httpd::routes& r);
|
||||
void set_collectd(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -14,16 +14,11 @@
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include <any>
|
||||
|
||||
namespace db {
|
||||
class system_keyspace;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_column_family(http_context& ctx, httpd::routes& r, sharded<db::system_keyspace>& sys_ks);
|
||||
void unset_column_family(http_context& ctx, httpd::routes& r);
|
||||
void set_column_family(http_context& ctx, routes& r);
|
||||
|
||||
table_id get_uuid(const sstring& name, const replica::database& db);
|
||||
const utils::UUID& get_uuid(const sstring& name, const replica::database& db);
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, std::function<void(replica::column_family&)> f);
|
||||
|
||||
|
||||
@@ -68,10 +63,9 @@ struct map_reduce_column_families_locally {
|
||||
std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)> reducer;
|
||||
future<std::unique_ptr<std::any>> operator()(replica::database& db) const {
|
||||
auto res = seastar::make_lw_shared<std::unique_ptr<std::any>>(std::make_unique<std::any>(init));
|
||||
return db.get_tables_metadata().for_each_table_gently([res, this] (table_id, seastar::lw_shared_ptr<replica::table> table) {
|
||||
*res = reducer(std::move(*res), mapper(*table.get()));
|
||||
return make_ready_future();
|
||||
}).then([res] () {
|
||||
return do_for_each(db.get_column_families(), [res, this](const std::pair<utils::UUID, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
*res = reducer(std::move(*res), mapper(*i.second.get()));
|
||||
}).then([res] {
|
||||
return std::move(*res);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <vector>
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
template<typename T>
|
||||
static auto acquire_cl_metric(http_context& ctx, std::function<T (db::commitlog*)> func) {
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_commitlog(http_context& ctx, httpd::routes& r);
|
||||
void set_commitlog(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/exception.hh>
|
||||
|
||||
#include "compaction_manager.hh"
|
||||
#include "compaction/compaction_manager.hh"
|
||||
@@ -23,7 +22,6 @@ namespace api {
|
||||
|
||||
namespace cm = httpd::compaction_manager_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
static future<json::json_return_type> get_cm_stats(http_context& ctx,
|
||||
int64_t compaction_manager::stats::*f) {
|
||||
@@ -43,8 +41,9 @@ static std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_ha
|
||||
return std::move(a);
|
||||
}
|
||||
|
||||
|
||||
void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
cm::get_compactions.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cm::get_compactions.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) {
|
||||
std::vector<cm::summary> summaries;
|
||||
const compaction_manager& cm = db.get_compaction_manager();
|
||||
@@ -66,12 +65,12 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cm::get_pending_tasks_by_table.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) {
|
||||
return do_with(std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>(), [&db](std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>& tasks) {
|
||||
return db.get_tables_metadata().for_each_table_gently([&tasks] (table_id, lw_shared_ptr<replica::table> table) {
|
||||
replica::table& cf = *table.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.estimate_pending_compactions();
|
||||
cm::get_pending_tasks_by_table.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return ctx.db.map_reduce0([&ctx](replica::database& db) {
|
||||
return do_with(std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>(), [&ctx, &db](std::unordered_map<std::pair<sstring, sstring>, uint64_t, utils::tuple_hash>& tasks) {
|
||||
return do_for_each(db.get_column_families(), [&tasks](const std::pair<utils::UUID, seastar::lw_shared_ptr<replica::table>>& i) {
|
||||
replica::table& cf = *i.second.get();
|
||||
tasks[std::make_pair(cf.schema()->ks_name(), cf.schema()->cf_name())] = cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
return make_ready_future<>();
|
||||
}).then([&tasks] {
|
||||
return std::move(tasks);
|
||||
@@ -92,14 +91,14 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cm::force_user_defined_compaction.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cm::force_user_defined_compaction.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
warn(unimplemented::cause::API);
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cm::stop_compaction.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cm::stop_compaction.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
auto type = req->get_query_param("type");
|
||||
return ctx.db.invoke_on_all([type] (replica::database& db) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
@@ -109,8 +108,8 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
cm::stop_keyspace_compaction.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto ks_name = validate_keyspace(ctx, req);
|
||||
cm::stop_keyspace_compaction.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto ks_name = validate_keyspace(ctx, req->param);
|
||||
auto table_names = parse_tables(ks_name, ctx, req->query_parameters, "tables");
|
||||
if (table_names.empty()) {
|
||||
table_names = map_keys(ctx.db.local().find_keyspace(ks_name).metadata().get()->cf_meta_data());
|
||||
@@ -120,46 +119,41 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
return parallel_for_each(table_names, [&db, &cm, &ks_name, type] (sstring& table_name) {
|
||||
auto& t = db.find_column_family(ks_name, table_name);
|
||||
return t.parallel_foreach_table_state([&] (compaction::table_state& ts) {
|
||||
return cm.stop_compaction(type, &ts);
|
||||
});
|
||||
return cm.stop_compaction(type, &t);
|
||||
});
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
cm::get_pending_tasks.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cm::get_pending_tasks.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.estimate_pending_compactions();
|
||||
return cf.get_compaction_strategy().estimated_pending_compactions(cf.as_table_state());
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cm::get_completed_tasks.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cm::get_completed_tasks.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return get_cm_stats(ctx, &compaction_manager::stats::completed_tasks);
|
||||
});
|
||||
|
||||
cm::get_total_compactions_completed.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cm::get_total_compactions_completed.set(r, [] (std::unique_ptr<request> req) {
|
||||
// FIXME
|
||||
// We are currently dont have an API for compaction
|
||||
// so returning a 0 as the number of total compaction is ok
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cm::get_bytes_compacted.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cm::get_bytes_compacted.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
warn(unimplemented::cause::API);
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cm::get_compaction_history.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
std::function<future<>(output_stream<char>&&)> f = [&ctx] (output_stream<char>&& out) -> future<> {
|
||||
auto s = std::move(out);
|
||||
bool first = true;
|
||||
std::exception_ptr ex;
|
||||
try {
|
||||
co_await s.write("[");
|
||||
co_await ctx.db.local().get_compaction_manager().get_compaction_history([&s, &first](const db::compaction_history_entry& entry) mutable -> future<> {
|
||||
cm::get_compaction_history.set(r, [] (std::unique_ptr<request> req) {
|
||||
std::function<future<>(output_stream<char>&&)> f = [](output_stream<char>&& s) {
|
||||
return do_with(output_stream<char>(std::move(s)), true, [] (output_stream<char>& s, bool& first){
|
||||
return s.write("[").then([&s, &first] {
|
||||
return db::system_keyspace::get_compaction_history([&s, &first](const db::system_keyspace::compaction_history_entry& entry) mutable {
|
||||
cm::history h;
|
||||
h.id = entry.id.to_sstring();
|
||||
h.ks = std::move(entry.ks);
|
||||
@@ -173,26 +167,23 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
e.value = it.second;
|
||||
h.rows_merged.push(std::move(e));
|
||||
}
|
||||
if (!first) {
|
||||
co_await s.write(", ");
|
||||
}
|
||||
auto fut = first ? make_ready_future<>() : s.write(", ");
|
||||
first = false;
|
||||
co_await formatter::write(s, h);
|
||||
return fut.then([&s, h = std::move(h)] {
|
||||
return formatter::write(s, h);
|
||||
});
|
||||
}).then([&s] {
|
||||
return s.write("]").then([&s] {
|
||||
return s.close();
|
||||
});
|
||||
});
|
||||
co_await s.write("]");
|
||||
co_await s.flush();
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
co_await s.close();
|
||||
if (ex) {
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
return make_ready_future<json::json_return_type>(std::move(f));
|
||||
});
|
||||
|
||||
cm::get_compaction_info.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cm::get_compaction_info.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
warn(unimplemented::cause::API);
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_compaction_manager(http_context& ctx, httpd::routes& r);
|
||||
void set_compaction_manager(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
template<class T>
|
||||
json::json_return_type get_json_return_type(const T& val) {
|
||||
@@ -45,7 +44,7 @@ future<> get_config_swagger_entry(std::string_view name, const std::string& desc
|
||||
} else {
|
||||
ss <<',';
|
||||
};
|
||||
ss << "\"/v2/config/" << name <<"\": {"
|
||||
ss << "\"/config/" << name <<"\": {"
|
||||
"\"get\": {"
|
||||
"\"description\": \"" << boost::replace_all_copy(boost::replace_all_copy(boost::replace_all_copy(description,"\n","\\n"),"\"", "''"), "\t", " ") <<"\","
|
||||
"\"operationId\": \"find_config_"<< name <<"\","
|
||||
@@ -76,9 +75,9 @@ future<> get_config_swagger_entry(std::string_view name, const std::string& desc
|
||||
|
||||
namespace cs = httpd::config_json;
|
||||
|
||||
void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx, routes& r, const db::config& cfg, bool first) {
|
||||
rb->register_function(r, [&cfg, first] (output_stream<char>& os) {
|
||||
return do_with(first, [&os, &cfg] (bool& first) {
|
||||
void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx, routes& r, const db::config& cfg) {
|
||||
rb->register_function(r, [&cfg] (output_stream<char>& os) {
|
||||
return do_with(true, [&os, &cfg] (bool& first) {
|
||||
auto f = make_ready_future();
|
||||
for (auto&& cfg_ref : cfg.values()) {
|
||||
auto&& cfg = cfg_ref.get();
|
||||
@@ -91,7 +90,7 @@ void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx
|
||||
});
|
||||
|
||||
cs::find_config_id.set(r, [&cfg] (const_req r) {
|
||||
auto id = r.get_path_param("id");
|
||||
auto id = r.param["id"];
|
||||
for (auto&& cfg_ref : cfg.values()) {
|
||||
auto&& cfg = cfg_ref.get();
|
||||
if (id == cfg.name()) {
|
||||
|
||||
@@ -13,5 +13,5 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_config(std::shared_ptr<httpd::api_registry_builder20> rb, http_context& ctx, httpd::routes& r, const db::config& cfg, bool first = false);
|
||||
void set_config(std::shared_ptr<api_registry_builder20> rb, http_context& ctx, routes& r, const db::config& cfg);
|
||||
}
|
||||
|
||||
@@ -6,64 +6,30 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "locator/token_metadata.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "locator/production_snitch_base.hh"
|
||||
#include "endpoint_snitch.hh"
|
||||
#include "api/api-doc/endpoint_snitch_info.json.hh"
|
||||
#include "api/api-doc/storage_service.json.hh"
|
||||
#include "utils/fb_utilities.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r, sharded<locator::snitch_ptr>& snitch) {
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r) {
|
||||
static auto host_or_broadcast = [](const_req req) {
|
||||
auto host = req.get_query_param("host");
|
||||
return host.empty() ? gms::inet_address(utils::fb_utilities::get_broadcast_address()) : gms::inet_address(host);
|
||||
};
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [&ctx](const_req req) {
|
||||
auto& topology = ctx.shared_token_metadata.local().get()->get_topology();
|
||||
auto ep = host_or_broadcast(req);
|
||||
if (!topology.has_endpoint(ep)) {
|
||||
// Cannot return error here, nodetool status can race, request
|
||||
// info about just-left node and not handle it nicely
|
||||
return locator::endpoint_dc_rack::default_location.dc;
|
||||
}
|
||||
return topology.get_datacenter(ep);
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [](const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_datacenter(host_or_broadcast(req));
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [&ctx](const_req req) {
|
||||
auto& topology = ctx.shared_token_metadata.local().get()->get_topology();
|
||||
auto ep = host_or_broadcast(req);
|
||||
if (!topology.has_endpoint(ep)) {
|
||||
// Cannot return error here, nodetool status can race, request
|
||||
// info about just-left node and not handle it nicely
|
||||
return locator::endpoint_dc_rack::default_location.rack;
|
||||
}
|
||||
return topology.get_rack(ep);
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [](const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_rack(host_or_broadcast(req));
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.set(r, [&snitch] (const_req req) {
|
||||
return snitch.local()->get_name();
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.set(r, [] (const_req req) {
|
||||
return locator::i_endpoint_snitch::get_local_snitch_ptr()->get_name();
|
||||
});
|
||||
|
||||
httpd::storage_service_json::update_snitch.set(r, [&snitch](std::unique_ptr<request> req) {
|
||||
locator::snitch_config cfg;
|
||||
cfg.name = req->get_query_param("ep_snitch_class_name");
|
||||
return locator::i_endpoint_snitch::reset_snitch(snitch, cfg).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
void unset_endpoint_snitch(http_context& ctx, routes& r) {
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.unset(r);
|
||||
httpd::endpoint_snitch_info_json::get_rack.unset(r);
|
||||
httpd::endpoint_snitch_info_json::get_snitch_name.unset(r);
|
||||
httpd::storage_service_json::update_snitch.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -10,13 +10,8 @@
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace locator {
|
||||
class snitch_ptr;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_endpoint_snitch(http_context& ctx, httpd::routes& r, sharded<locator::snitch_ptr>&);
|
||||
void unset_endpoint_snitch(http_context& ctx, httpd::routes& r);
|
||||
void set_endpoint_snitch(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -12,41 +12,21 @@
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "log.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include <seastar/util/short_streams.hh>
|
||||
#include "seastar/core/future-util.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
namespace hf = httpd::error_injection_json;
|
||||
|
||||
void set_error_injection(http_context& ctx, routes& r) {
|
||||
|
||||
hf::enable_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
sstring injection = req->param["injection"];
|
||||
bool one_shot = req->get_query_param("one_shot") == "True";
|
||||
auto params = req->content;
|
||||
|
||||
const size_t max_params_size = 1024 * 1024;
|
||||
if (params.size() > max_params_size) {
|
||||
// This is a hard limit, because we don't want to allocate
|
||||
// too much memory or block the thread for too long.
|
||||
throw httpd::bad_param_exception(format("Injection parameters are too long, max length is {}", max_params_size));
|
||||
}
|
||||
|
||||
try {
|
||||
auto parameters = params.empty()
|
||||
? utils::error_injection_parameters{}
|
||||
: rjson::parse_to_map<utils::error_injection_parameters>(params);
|
||||
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.enable_on_all(injection, one_shot, std::move(parameters)).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
} catch (const rjson::error& e) {
|
||||
throw httpd::bad_param_exception(format("Failed to parse injections parameters: {}", e.what()));
|
||||
}
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.enable_on_all(injection, one_shot).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
|
||||
hf::get_enabled_injections_on_all.set(r, [](std::unique_ptr<request> req) {
|
||||
@@ -56,7 +36,7 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
hf::disable_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
sstring injection = req->param["injection"];
|
||||
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all(injection).then([] {
|
||||
@@ -71,13 +51,6 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
hf::message_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.receive_message_on_all(injection).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace api
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_error_injection(http_context& ctx, httpd::routes& r);
|
||||
void set_error_injection(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -8,93 +8,86 @@
|
||||
|
||||
#include "failure_detector.hh"
|
||||
#include "api/api-doc/failure_detector.json.hh"
|
||||
#include "gms/failure_detector.hh"
|
||||
#include "gms/application_state.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
namespace fd = httpd::failure_detector_json;
|
||||
|
||||
void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
fd::get_all_endpoint_states.set(r, [&g](std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::vector<fd::endpoint_state> res;
|
||||
res.reserve(g.num_endpoints());
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& addr, const gms::endpoint_state& eps) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = fmt::to_string(addr);
|
||||
val.is_alive = g.is_alive(addr);
|
||||
val.generation = eps.get_heart_beat_state().get_generation().value();
|
||||
val.version = eps.get_heart_beat_state().get_heart_beat_version().value();
|
||||
val.update_time = eps.get_update_timestamp().time_since_epoch().count();
|
||||
for (const auto& [as_type, app_state] : eps.get_application_state_map()) {
|
||||
fd::version_value version_val;
|
||||
// We return the enum index and not it's name to stay compatible to origin
|
||||
// method that the state index are static but the name can be changed.
|
||||
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(as_type);
|
||||
version_val.value = app_state.value();
|
||||
version_val.version = app_state.version().value();
|
||||
val.application_state.push(version_val);
|
||||
}
|
||||
res.emplace_back(std::move(val));
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
std::vector<fd::endpoint_state> res;
|
||||
for (auto i : g.endpoint_state_map) {
|
||||
fd::endpoint_state val;
|
||||
val.addrs = boost::lexical_cast<std::string>(i.first);
|
||||
val.is_alive = i.second.is_alive();
|
||||
val.generation = i.second.get_heart_beat_state().get_generation();
|
||||
val.version = i.second.get_heart_beat_state().get_heart_beat_version();
|
||||
val.update_time = i.second.get_update_timestamp().time_since_epoch().count();
|
||||
for (auto a : i.second.get_application_state_map()) {
|
||||
fd::version_value version_val;
|
||||
// We return the enum index and not it's name to stay compatible to origin
|
||||
// method that the state index are static but the name can be changed.
|
||||
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(a.first);
|
||||
version_val.value = a.second.value;
|
||||
version_val.version = a.second.version;
|
||||
val.application_state.push(version_val);
|
||||
}
|
||||
res.push_back(val);
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
fd::get_up_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
int res = g.get_up_endpoint_count();
|
||||
return gms::get_up_endpoint_count(g).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_down_endpoint_count.set(r, [&g](std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
int res = g.get_down_endpoint_count();
|
||||
return gms::get_down_endpoint_count(g).then([](int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_phi_convict_threshold.set(r, [] (std::unique_ptr<request> req) {
|
||||
return make_ready_future<json::json_return_type>(8);
|
||||
return gms::get_phi_convict_threshold().then([](double res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_simple_states.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::map<sstring, sstring> nodes_status;
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& node, const gms::endpoint_state&) {
|
||||
nodes_status.emplace(node.to_sstring(), g.is_alive(node) ? "UP" : "DOWN");
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(nodes_status));
|
||||
return gms::get_simple_states(g).then([](const std::map<sstring, sstring>& map) {
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(map));
|
||||
});
|
||||
});
|
||||
|
||||
fd::set_phi_convict_threshold.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
std::ignore = atof(req->get_query_param("phi").c_str());
|
||||
return make_ready_future<json::json_return_type>("");
|
||||
double phi = atof(req->get_query_param("phi").c_str());
|
||||
return gms::set_phi_convict_threshold(phi).then([]() {
|
||||
return make_ready_future<json::json_return_type>("");
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_state.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [req = std::move(req)] (gms::gossiper& g) {
|
||||
auto state = g.get_endpoint_state_ptr(gms::inet_address(req->get_path_param("addr")));
|
||||
if (!state) {
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->get_path_param("addr")));
|
||||
}
|
||||
std::stringstream ss;
|
||||
g.append_endpoint_state(ss, *state);
|
||||
return make_ready_future<json::json_return_type>(sstring(ss.str()));
|
||||
return get_endpoint_state(g, req->param["addr"]).then([](const sstring& state) {
|
||||
return make_ready_future<json::json_return_type>(state);
|
||||
});
|
||||
});
|
||||
|
||||
fd::get_endpoint_phi_values.set(r, [](std::unique_ptr<request> req) {
|
||||
// We no longer have a phi failure detector,
|
||||
// just returning the empty value is good enough.
|
||||
std::vector<fd::endpoint_phi_value> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
return gms::get_arrival_samples().then([](std::map<gms::inet_address, gms::arrival_window> map) {
|
||||
std::vector<fd::endpoint_phi_value> res;
|
||||
auto now = gms::arrival_window::clk::now();
|
||||
for (auto& p : map) {
|
||||
fd::endpoint_phi_value val;
|
||||
val.endpoint = p.first.to_sstring();
|
||||
val.phi = p.second.phi(now);
|
||||
res.emplace_back(std::move(val));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,6 @@ class gossiper;
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_failure_detector(http_context& ctx, httpd::routes& r, gms::gossiper& g);
|
||||
void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g);
|
||||
|
||||
}
|
||||
|
||||
@@ -6,65 +6,57 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "gossiper.hh"
|
||||
#include "api/api-doc/gossiper.json.hh"
|
||||
#include "gms/endpoint_state.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
using namespace json;
|
||||
|
||||
void set_gossiper(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
httpd::gossiper_json::get_down_endpoint.set(r, [&g] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto res = co_await g.get_unreachable_members_synchronized();
|
||||
co_return json::json_return_type(container_to_vec(res));
|
||||
httpd::gossiper_json::get_down_endpoint.set(r, [&g] (const_req req) {
|
||||
auto res = g.get_unreachable_members();
|
||||
return container_to_vec(res);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_live_endpoint.set(r, [&g] (const_req req) {
|
||||
auto res = g.get_live_members();
|
||||
return container_to_vec(res);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_live_endpoint.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.get_live_members_synchronized().then([] (auto res) {
|
||||
return make_ready_future<json::json_return_type>(container_to_vec(res));
|
||||
httpd::gossiper_json::get_endpoint_downtime.set(r, [&g] (const_req req) {
|
||||
gms::inet_address ep(req.param["addr"]);
|
||||
return g.get_endpoint_downtime(ep);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_generation_number.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return g.get_current_generation_number(ep).then([] (int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_endpoint_downtime.set(r, [&g] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
// synchronize unreachable_members on all shards
|
||||
co_await g.get_unreachable_members_synchronized();
|
||||
co_return g.get_endpoint_downtime(ep);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_generation_number.set(r, [&g] (std::unique_ptr<http::request> req) {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
return g.get_current_generation_number(ep).then([] (gms::generation_type res) {
|
||||
return make_ready_future<json::json_return_type>(res.value());
|
||||
httpd::gossiper_json::get_current_heart_beat_version.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return g.get_current_heart_beat_version(ep).then([] (int res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_heart_beat_version.set(r, [&g] (std::unique_ptr<http::request> req) {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
return g.get_current_heart_beat_version(ep).then([] (gms::version_type res) {
|
||||
return make_ready_future<json::json_return_type>(res.value());
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::assassinate_endpoint.set(r, [&g](std::unique_ptr<http::request> req) {
|
||||
httpd::gossiper_json::assassinate_endpoint.set(r, [&g](std::unique_ptr<request> req) {
|
||||
if (req->get_query_param("unsafe") != "True") {
|
||||
return g.assassinate_endpoint(req->get_path_param("addr")).then([] {
|
||||
return g.assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
}
|
||||
return g.unsafe_assassinate_endpoint(req->get_path_param("addr")).then([] {
|
||||
return g.unsafe_assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::force_remove_endpoint.set(r, [&g](std::unique_ptr<http::request> req) {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
return g.force_remove_endpoint(ep, gms::null_permit_id).then([] {
|
||||
httpd::gossiper_json::force_remove_endpoint.set(r, [&g](std::unique_ptr<request> req) {
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return g.force_remove_endpoint(ep).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
@@ -18,6 +18,6 @@ class gossiper;
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_gossiper(http_context& ctx, httpd::routes& r, gms::gossiper& g);
|
||||
void set_gossiper(http_context& ctx, routes& r, gms::gossiper& g);
|
||||
|
||||
}
|
||||
|
||||
@@ -13,41 +13,46 @@
|
||||
#include "api/api-doc/hinted_handoff.json.hh"
|
||||
|
||||
#include "gms/inet_address.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
namespace hh = httpd::hinted_handoff_json;
|
||||
|
||||
void set_hinted_handoff(http_context& ctx, routes& r, sharded<service::storage_proxy>& proxy) {
|
||||
hh::create_hints_sync_point.set(r, [&proxy] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto parse_hosts_list = [] (sstring arg) {
|
||||
void set_hinted_handoff(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
hh::create_hints_sync_point.set(r, [&ctx, &g] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto parse_hosts_list = [&g] (sstring arg) {
|
||||
std::vector<sstring> hosts_str = split(arg, ",");
|
||||
std::vector<gms::inet_address> hosts;
|
||||
hosts.reserve(hosts_str.size());
|
||||
|
||||
for (const auto& host_str : hosts_str) {
|
||||
try {
|
||||
gms::inet_address host;
|
||||
host = gms::inet_address(host_str);
|
||||
hosts.push_back(host);
|
||||
} catch (std::exception& e) {
|
||||
throw httpd::bad_param_exception(format("Failed to parse host address {}: {}", host_str, e.what()));
|
||||
if (hosts_str.empty()) {
|
||||
// No target_hosts specified means that we should wait for hints for all nodes to be sent
|
||||
const auto members_set = g.get_live_members();
|
||||
std::copy(members_set.begin(), members_set.end(), std::back_inserter(hosts));
|
||||
} else {
|
||||
for (const auto& host_str : hosts_str) {
|
||||
try {
|
||||
gms::inet_address host;
|
||||
host = gms::inet_address(host_str);
|
||||
hosts.push_back(host);
|
||||
} catch (std::exception& e) {
|
||||
throw httpd::bad_param_exception(format("Failed to parse host address {}: {}", host_str, e.what()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hosts;
|
||||
};
|
||||
|
||||
std::vector<gms::inet_address> target_hosts = parse_hosts_list(req->get_query_param("target_hosts"));
|
||||
return proxy.local().create_hint_sync_point(std::move(target_hosts)).then([] (db::hints::sync_point sync_point) {
|
||||
return ctx.sp.local().create_hint_sync_point(std::move(target_hosts)).then([] (db::hints::sync_point sync_point) {
|
||||
return json::json_return_type(sync_point.encode());
|
||||
});
|
||||
});
|
||||
|
||||
hh::get_hints_sync_point.set(r, [&proxy] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
hh::get_hints_sync_point.set(r, [&ctx] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
db::hints::sync_point sync_point;
|
||||
const sstring encoded = req->get_query_param("id");
|
||||
try {
|
||||
@@ -81,49 +86,49 @@ void set_hinted_handoff(http_context& ctx, routes& r, sharded<service::storage_p
|
||||
using return_type = hh::ns_get_hints_sync_point::get_hints_sync_point_return_type;
|
||||
using return_type_wrapper = hh::ns_get_hints_sync_point::return_type_wrapper;
|
||||
|
||||
return proxy.local().wait_for_hint_sync_point(std::move(sync_point), deadline).then([] {
|
||||
return ctx.sp.local().wait_for_hint_sync_point(std::move(sync_point), deadline).then([] {
|
||||
return json::json_return_type(return_type_wrapper(return_type::DONE));
|
||||
}).handle_exception_type([] (const timed_out_error&) {
|
||||
return json::json_return_type(return_type_wrapper(return_type::IN_PROGRESS));
|
||||
});
|
||||
});
|
||||
|
||||
hh::list_endpoints_pending_hints.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
hh::list_endpoints_pending_hints.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
std::vector<sstring> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
hh::truncate_all_hints.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
hh::truncate_all_hints.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
sstring host = req->get_query_param("host");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
hh::schedule_hint_delivery.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
hh::schedule_hint_delivery.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
sstring host = req->get_query_param("host");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
hh::pause_hints_delivery.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
hh::pause_hints_delivery.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
sstring pause = req->get_query_param("pause");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
hh::get_create_hint_count.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
hh::get_create_hint_count.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
sstring host = req->get_query_param("host");
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
hh::get_not_stored_hints_count.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
hh::get_not_stored_hints_count.set(r, [] (std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
sstring host = req->get_query_param("host");
|
||||
|
||||
@@ -8,14 +8,17 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "api.hh"
|
||||
|
||||
namespace service { class storage_proxy; }
|
||||
namespace gms {
|
||||
|
||||
class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_hinted_handoff(http_context& ctx, httpd::routes& r, sharded<service::storage_proxy>& p);
|
||||
void unset_hinted_handoff(http_context& ctx, httpd::routes& r);
|
||||
void set_hinted_handoff(http_context& ctx, routes& r, gms::gossiper& g);
|
||||
void unset_hinted_handoff(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
#include "replica/database.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
static logging::logger alogger("lsa-api");
|
||||
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_lsa(http_context& ctx, httpd::routes& r);
|
||||
void set_lsa(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
|
||||
using namespace seastar::httpd;
|
||||
using namespace httpd::messaging_service_json;
|
||||
using namespace netw;
|
||||
|
||||
@@ -29,7 +28,7 @@ std::vector<message_counter> map_to_message_counters(
|
||||
std::vector<message_counter> res;
|
||||
for (auto i : map) {
|
||||
res.push_back(message_counter());
|
||||
res.back().key = fmt::to_string(i.first);
|
||||
res.back().key = boost::lexical_cast<sstring>(i.first);
|
||||
res.back().value = i.second;
|
||||
}
|
||||
return res;
|
||||
|
||||
@@ -14,7 +14,7 @@ namespace netw { class messaging_service; }
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_messaging_service(http_context& ctx, httpd::routes& r, sharded<netw::messaging_service>& ms);
|
||||
void unset_messaging_service(http_context& ctx, httpd::routes& r);
|
||||
void set_messaging_service(http_context& ctx, routes& r, sharded<netw::messaging_service>& ms);
|
||||
void unset_messaging_service(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
70
api/raft.cc
70
api/raft.cc
@@ -1,70 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2024-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "api/api.hh"
|
||||
#include "api/api-doc/raft.json.hh"
|
||||
|
||||
#include "service/raft/raft_group_registry.hh"
|
||||
|
||||
using namespace seastar::httpd;
|
||||
|
||||
extern logging::logger apilog;
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace r = httpd::raft_json;
|
||||
using namespace json;
|
||||
|
||||
void set_raft(http_context&, httpd::routes& r, sharded<service::raft_group_registry>& raft_gr) {
|
||||
r::trigger_snapshot.set(r, [&raft_gr] (std::unique_ptr<http::request> req) -> future<json_return_type> {
|
||||
raft::group_id gid{utils::UUID{req->get_path_param("group_id")}};
|
||||
auto timeout_dur = std::invoke([timeout_str = req->get_query_param("timeout")] {
|
||||
if (timeout_str.empty()) {
|
||||
return std::chrono::seconds{60};
|
||||
}
|
||||
auto dur = std::stoll(timeout_str);
|
||||
if (dur <= 0) {
|
||||
throw std::runtime_error{"Timeout must be a positive number."};
|
||||
}
|
||||
return std::chrono::seconds{dur};
|
||||
});
|
||||
|
||||
std::atomic<bool> found_srv{false};
|
||||
co_await raft_gr.invoke_on_all([gid, timeout_dur, &found_srv] (service::raft_group_registry& raft_gr) -> future<> {
|
||||
auto* srv = raft_gr.find_server(gid);
|
||||
if (!srv) {
|
||||
co_return;
|
||||
}
|
||||
|
||||
found_srv = true;
|
||||
abort_on_expiry aoe(lowres_clock::now() + timeout_dur);
|
||||
apilog.info("Triggering Raft group {} snapshot", gid);
|
||||
auto result = co_await srv->trigger_snapshot(&aoe.abort_source());
|
||||
if (result) {
|
||||
apilog.info("New snapshot for Raft group {} created", gid);
|
||||
} else {
|
||||
apilog.info("Could not create new snapshot for Raft group {}, no new entries applied", gid);
|
||||
}
|
||||
});
|
||||
|
||||
if (!found_srv) {
|
||||
throw std::runtime_error{fmt::format("Server for group ID {} not found", gid)};
|
||||
}
|
||||
|
||||
co_return json_void{};
|
||||
});
|
||||
}
|
||||
|
||||
void unset_raft(http_context&, httpd::routes& r) {
|
||||
r::trigger_snapshot.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
18
api/raft.hh
18
api/raft.hh
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2023-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api_init.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_raft(http_context& ctx, httpd::routes& r, sharded<service::raft_group_registry>& raft_gr);
|
||||
void unset_raft(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
@@ -10,21 +10,18 @@
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "api/api-doc/storage_proxy.json.hh"
|
||||
#include "api/api-doc/utils.json.hh"
|
||||
#include "service/storage_service.hh"
|
||||
#include "db/config.hh"
|
||||
#include "utils/histogram.hh"
|
||||
#include "replica/database.hh"
|
||||
#include <seastar/core/scheduling_specific.hh>
|
||||
#include "seastar/core/scheduling_specific.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace sp = httpd::storage_proxy_json;
|
||||
using proxy = service::storage_proxy;
|
||||
using namespace seastar::httpd;
|
||||
using namespace json;
|
||||
|
||||
utils::time_estimated_histogram timed_rate_moving_average_summary_merge(utils::time_estimated_histogram a, const utils::timed_rate_moving_average_summary_and_histogram& b) {
|
||||
return a.merge(b.histogram());
|
||||
}
|
||||
|
||||
/**
|
||||
* This function implement a two dimentional map reduce where
|
||||
@@ -58,10 +55,10 @@ future<V> two_dimensional_map_reduce(distributed<service::storage_proxy>& d,
|
||||
* @param initial_value - the initial value to use for both aggregations* @return
|
||||
* @return A future that resolves to the result of the aggregation.
|
||||
*/
|
||||
template<typename V, typename Reducer, typename F, typename C>
|
||||
template<typename V, typename Reducer, typename F>
|
||||
future<V> two_dimensional_map_reduce(distributed<service::storage_proxy>& d,
|
||||
C F::*f, Reducer reducer, V initial_value) {
|
||||
return two_dimensional_map_reduce(d, [f] (F& stats) -> V {
|
||||
V F::*f, Reducer reducer, V initial_value) {
|
||||
return two_dimensional_map_reduce(d, [f] (F& stats) {
|
||||
return stats.*f;
|
||||
}, reducer, initial_value);
|
||||
}
|
||||
@@ -115,17 +112,17 @@ utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimat
|
||||
return res;
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(sharded<service::storage_proxy>& proxy, utils::timed_rate_moving_average_summary_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(proxy, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).histogram();
|
||||
}, utils::time_estimated_histogram_merge, utils::time_estimated_histogram()).then([](const utils::time_estimated_histogram& val) {
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::time_estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(ctx.sp, f, utils::time_estimated_histogram_merge,
|
||||
utils::time_estimated_histogram()).then([](const utils::time_estimated_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(time_to_json_histogram(val));
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_estimated_histogram(sharded<service::storage_proxy>& proxy, utils::estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, utils::estimated_histogram service::storage_proxy_stats::stats::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(proxy, f, utils::estimated_histogram_merge,
|
||||
return two_dimensional_map_reduce(ctx.sp, f, utils::estimated_histogram_merge,
|
||||
utils::estimated_histogram()).then([](const utils::estimated_histogram& val) {
|
||||
utils_json::estimated_histogram res;
|
||||
res = val;
|
||||
@@ -133,8 +130,8 @@ static future<json::json_return_type> sum_estimated_histogram(sharded<service::
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> total_latency(sharded<service::storage_proxy>& proxy, utils::timed_rate_moving_average_summary_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(proxy, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
static future<json::json_return_type> total_latency(http_context& ctx, utils::timed_rate_moving_average_and_histogram service::storage_proxy_stats::stats::*f) {
|
||||
return two_dimensional_map_reduce(ctx.sp, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).hist.mean * (stats.*f).hist.count;
|
||||
}, std::plus<double>(), 0.0).then([](double val) {
|
||||
int64_t res = val;
|
||||
@@ -153,7 +150,7 @@ static future<json::json_return_type> total_latency(sharded<service::storage_pr
|
||||
template<typename F>
|
||||
future<json::json_return_type>
|
||||
sum_histogram_stats_storage_proxy(distributed<proxy>& d,
|
||||
utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram F::*f) {
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).hist;
|
||||
}, std::plus<utils::ihistogram>(), utils::ihistogram()).
|
||||
@@ -173,7 +170,7 @@ sum_histogram_stats_storage_proxy(distributed<proxy>& d,
|
||||
template<typename F>
|
||||
future<json::json_return_type>
|
||||
sum_timer_stats_storage_proxy(distributed<proxy>& d,
|
||||
utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
utils::timed_rate_moving_average_and_histogram F::*f) {
|
||||
|
||||
return two_dimensional_map_reduce(d, [f] (service::storage_proxy_stats::stats& stats) {
|
||||
return (stats.*f).rate();
|
||||
@@ -183,76 +180,76 @@ sum_timer_stats_storage_proxy(distributed<proxy>& d,
|
||||
});
|
||||
}
|
||||
|
||||
void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_proxy>& proxy) {
|
||||
sp::get_total_hints.set(r, [](std::unique_ptr<http::request> req) {
|
||||
void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_service>& ss) {
|
||||
sp::get_total_hints.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::get_hinted_handoff_enabled.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
const auto& filter = proxy.local().get_hints_host_filter();
|
||||
sp::get_hinted_handoff_enabled.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
const auto& filter = service::get_storage_proxy().local().get_hints_host_filter();
|
||||
return make_ready_future<json::json_return_type>(!filter.is_disabled_for_all());
|
||||
});
|
||||
|
||||
sp::set_hinted_handoff_enabled.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
sp::set_hinted_handoff_enabled.set(r, [](std::unique_ptr<request> req) {
|
||||
auto enable = req->get_query_param("enable");
|
||||
auto filter = (enable == "true" || enable == "1")
|
||||
? db::hints::host_filter(db::hints::host_filter::enabled_for_all_tag {})
|
||||
: db::hints::host_filter(db::hints::host_filter::disabled_for_all_tag {});
|
||||
return proxy.invoke_on_all([filter = std::move(filter)] (service::storage_proxy& sp) {
|
||||
return service::get_storage_proxy().invoke_on_all([filter = std::move(filter)] (service::storage_proxy& sp) {
|
||||
return sp.change_hints_host_filter(filter);
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
sp::get_hinted_handoff_enabled_by_dc.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
sp::get_hinted_handoff_enabled_by_dc.set(r, [](std::unique_ptr<request> req) {
|
||||
std::vector<sstring> res;
|
||||
const auto& filter = proxy.local().get_hints_host_filter();
|
||||
const auto& filter = service::get_storage_proxy().local().get_hints_host_filter();
|
||||
const auto& dcs = filter.get_dcs();
|
||||
res.reserve(res.size());
|
||||
std::copy(dcs.begin(), dcs.end(), std::back_inserter(res));
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
sp::set_hinted_handoff_enabled_by_dc_list.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
sp::set_hinted_handoff_enabled_by_dc_list.set(r, [](std::unique_ptr<request> req) {
|
||||
auto dcs = req->get_query_param("dcs");
|
||||
auto filter = db::hints::host_filter::parse_from_dc_list(std::move(dcs));
|
||||
return proxy.invoke_on_all([filter = std::move(filter)] (service::storage_proxy& sp) {
|
||||
return service::get_storage_proxy().invoke_on_all([filter = std::move(filter)] (service::storage_proxy& sp) {
|
||||
return sp.change_hints_host_filter(filter);
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
sp::get_max_hint_window.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::get_max_hint_window.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
sp::set_max_hint_window.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_max_hint_window.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("ms");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_max_hints_in_progress.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::get_max_hints_in_progress.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(1);
|
||||
});
|
||||
|
||||
sp::set_max_hints_in_progress.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_max_hints_in_progress.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("qs");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_hints_in_progress.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::get_hints_in_progress.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
@@ -262,7 +259,7 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_rpc_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
@@ -273,7 +270,7 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().read_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_read_rpc_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_read_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
@@ -284,7 +281,7 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().write_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_write_rpc_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
@@ -295,7 +292,7 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().counter_write_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_counter_write_rpc_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_counter_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
@@ -306,7 +303,7 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().cas_contention_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_cas_contention_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_cas_contention_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
@@ -317,7 +314,7 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().range_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_range_rpc_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_range_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
@@ -328,147 +325,160 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return ctx.db.local().get_config().truncate_request_timeout_in_ms()/1000.0;
|
||||
});
|
||||
|
||||
sp::set_truncate_rpc_timeout.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::set_truncate_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto enable = req->get_query_param("timeout");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::reload_trigger_classes.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::reload_trigger_classes.set(r, [](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
sp::get_read_repair_attempted.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::read_repair_attempts);
|
||||
sp::get_read_repair_attempted.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read_repair_attempts);
|
||||
});
|
||||
|
||||
sp::get_read_repair_repaired_blocking.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::read_repair_repaired_blocking);
|
||||
sp::get_read_repair_repaired_blocking.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read_repair_repaired_blocking);
|
||||
});
|
||||
|
||||
sp::get_read_repair_repaired_background.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::read_repair_repaired_background);
|
||||
sp::get_read_repair_repaired_background.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read_repair_repaired_background);
|
||||
});
|
||||
|
||||
sp::get_cas_read_timeouts.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &proxy::stats::cas_read_timeouts);
|
||||
sp::get_schema_versions.set(r, [&ss](std::unique_ptr<request> req) {
|
||||
return ss.local().describe_schema_versions().then([] (auto result) {
|
||||
std::vector<sp::mapper_list> res;
|
||||
for (auto e : result) {
|
||||
sp::mapper_list entry;
|
||||
entry.key = std::move(e.first);
|
||||
entry.value = std::move(e.second);
|
||||
res.emplace_back(std::move(entry));
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(std::move(res));
|
||||
});
|
||||
});
|
||||
|
||||
sp::get_cas_read_unavailables.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &proxy::stats::cas_read_unavailables);
|
||||
sp::get_cas_read_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_read_timeouts);
|
||||
});
|
||||
|
||||
sp::get_cas_write_timeouts.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &proxy::stats::cas_write_timeouts);
|
||||
sp::get_cas_read_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_read_unavailables);
|
||||
});
|
||||
|
||||
sp::get_cas_write_unavailables.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &proxy::stats::cas_write_unavailables);
|
||||
sp::get_cas_write_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_write_timeouts);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_unfinished_commit.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats(proxy, &proxy::stats::cas_write_unfinished_commit);
|
||||
sp::get_cas_write_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &proxy::stats::cas_write_unavailables);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_contention.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_estimated_histogram(proxy, &proxy::stats::cas_write_contention);
|
||||
sp::get_cas_write_metrics_unfinished_commit.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_write_unfinished_commit);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_condition_not_met.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats(proxy, &proxy::stats::cas_write_condition_not_met);
|
||||
sp::get_cas_write_metrics_contention.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::cas_write_contention);
|
||||
});
|
||||
|
||||
sp::get_cas_write_metrics_failed_read_round_optimization.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats(proxy, &proxy::stats::cas_failed_read_round_optimization);
|
||||
sp::get_cas_write_metrics_condition_not_met.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_write_condition_not_met);
|
||||
});
|
||||
|
||||
sp::get_cas_read_metrics_unfinished_commit.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_stats(proxy, &proxy::stats::cas_read_unfinished_commit);
|
||||
sp::get_cas_write_metrics_failed_read_round_optimization.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_failed_read_round_optimization);
|
||||
});
|
||||
|
||||
sp::get_cas_read_metrics_contention.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_estimated_histogram(proxy, &proxy::stats::cas_read_contention);
|
||||
sp::get_cas_read_metrics_unfinished_commit.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_stats(ctx.sp, &proxy::stats::cas_read_unfinished_commit);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_timeouts.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &service::storage_proxy_stats::stats::read_timeouts);
|
||||
sp::get_cas_read_metrics_contention.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &proxy::stats::cas_read_contention);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_unavailables.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &service::storage_proxy_stats::stats::read_unavailables);
|
||||
sp::get_read_metrics_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::read_timeouts);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_timeouts.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &service::storage_proxy_stats::stats::range_slice_timeouts);
|
||||
sp::get_read_metrics_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::read_unavailables);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_unavailables.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &service::storage_proxy_stats::stats::range_slice_unavailables);
|
||||
sp::get_range_metrics_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::range_slice_timeouts);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_timeouts.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &service::storage_proxy_stats::stats::write_timeouts);
|
||||
sp::get_range_metrics_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::range_slice_unavailables);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_unavailables.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_long(proxy, &service::storage_proxy_stats::stats::write_unavailables);
|
||||
sp::get_write_metrics_timeouts.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::write_timeouts);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_timeouts_rates.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_obj(proxy, &service::storage_proxy_stats::stats::read_timeouts);
|
||||
sp::get_write_metrics_unavailables.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_long(ctx.sp, &service::storage_proxy_stats::stats::write_unavailables);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_unavailables_rates.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_obj(proxy, &service::storage_proxy_stats::stats::read_unavailables);
|
||||
sp::get_read_metrics_timeouts_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::read_timeouts);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_timeouts_rates.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_obj(proxy, &service::storage_proxy_stats::stats::range_slice_timeouts);
|
||||
sp::get_read_metrics_unavailables_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::read_unavailables);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_unavailables_rates.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_obj(proxy, &service::storage_proxy_stats::stats::range_slice_unavailables);
|
||||
sp::get_range_metrics_timeouts_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::range_slice_timeouts);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_timeouts_rates.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_obj(proxy, &service::storage_proxy_stats::stats::write_timeouts);
|
||||
sp::get_range_metrics_unavailables_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::range_slice_unavailables);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_unavailables_rates.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timed_rate_as_obj(proxy, &service::storage_proxy_stats::stats::write_unavailables);
|
||||
sp::get_write_metrics_timeouts_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::write_timeouts);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_latency_histogram_depricated.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_histogram_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::range);
|
||||
sp::get_write_metrics_unavailables_rates.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timed_rate_as_obj(ctx.sp, &service::storage_proxy_stats::stats::write_unavailables);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_latency_histogram_depricated.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_histogram_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::write);
|
||||
sp::get_range_metrics_latency_histogram_depricated.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::range);
|
||||
});
|
||||
|
||||
sp::get_read_metrics_latency_histogram_depricated.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_histogram_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::read);
|
||||
sp::get_write_metrics_latency_histogram_depricated.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::write);
|
||||
});
|
||||
|
||||
sp::get_range_metrics_latency_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timer_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::range);
|
||||
sp::get_read_metrics_latency_histogram_depricated.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_histogram_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read);
|
||||
});
|
||||
|
||||
sp::get_write_metrics_latency_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timer_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::write);
|
||||
});
|
||||
sp::get_cas_write_metrics_latency_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timer_stats(proxy, &proxy::stats::cas_write);
|
||||
sp::get_range_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::range);
|
||||
});
|
||||
|
||||
sp::get_cas_read_metrics_latency_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timer_stats(proxy, &proxy::stats::cas_read);
|
||||
sp::get_write_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::write);
|
||||
});
|
||||
sp::get_cas_write_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::cas_write);
|
||||
});
|
||||
|
||||
sp::get_view_write_metrics_latency_histogram.set(r, [](std::unique_ptr<http::request> req) {
|
||||
sp::get_cas_read_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats(ctx.sp, &proxy::stats::cas_read);
|
||||
});
|
||||
|
||||
sp::get_view_write_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
//TBD
|
||||
// FIXME
|
||||
// No View metrics are available, so just return empty moving average
|
||||
@@ -476,100 +486,32 @@ void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_pr
|
||||
return make_ready_future<json::json_return_type>(get_empty_moving_average());
|
||||
});
|
||||
|
||||
sp::get_read_metrics_latency_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timer_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::read);
|
||||
sp::get_read_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::read);
|
||||
});
|
||||
|
||||
sp::get_read_estimated_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_estimated_histogram(proxy, &service::storage_proxy_stats::stats::read);
|
||||
sp::get_read_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::estimated_read);
|
||||
});
|
||||
|
||||
sp::get_read_latency.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return total_latency(proxy, &service::storage_proxy_stats::stats::read);
|
||||
sp::get_read_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::read);
|
||||
});
|
||||
sp::get_write_estimated_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_estimated_histogram(proxy, &service::storage_proxy_stats::stats::write);
|
||||
sp::get_write_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_estimated_histogram(ctx, &service::storage_proxy_stats::stats::estimated_write);
|
||||
});
|
||||
|
||||
sp::get_write_latency.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return total_latency(proxy, &service::storage_proxy_stats::stats::write);
|
||||
sp::get_write_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::write);
|
||||
});
|
||||
|
||||
sp::get_range_estimated_histogram.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return sum_timer_stats_storage_proxy(proxy, &service::storage_proxy_stats::stats::range);
|
||||
sp::get_range_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return sum_timer_stats_storage_proxy(ctx.sp, &service::storage_proxy_stats::stats::range);
|
||||
});
|
||||
|
||||
sp::get_range_latency.set(r, [&proxy](std::unique_ptr<http::request> req) {
|
||||
return total_latency(proxy, &service::storage_proxy_stats::stats::range);
|
||||
sp::get_range_latency.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
return total_latency(ctx, &service::storage_proxy_stats::stats::range);
|
||||
});
|
||||
}
|
||||
|
||||
void unset_storage_proxy(http_context& ctx, routes& r) {
|
||||
sp::get_total_hints.unset(r);
|
||||
sp::get_hinted_handoff_enabled.unset(r);
|
||||
sp::set_hinted_handoff_enabled.unset(r);
|
||||
sp::get_hinted_handoff_enabled_by_dc.unset(r);
|
||||
sp::set_hinted_handoff_enabled_by_dc_list.unset(r);
|
||||
sp::get_max_hint_window.unset(r);
|
||||
sp::set_max_hint_window.unset(r);
|
||||
sp::get_max_hints_in_progress.unset(r);
|
||||
sp::set_max_hints_in_progress.unset(r);
|
||||
sp::get_hints_in_progress.unset(r);
|
||||
sp::get_rpc_timeout.unset(r);
|
||||
sp::set_rpc_timeout.unset(r);
|
||||
sp::get_read_rpc_timeout.unset(r);
|
||||
sp::set_read_rpc_timeout.unset(r);
|
||||
sp::get_write_rpc_timeout.unset(r);
|
||||
sp::set_write_rpc_timeout.unset(r);
|
||||
sp::get_counter_write_rpc_timeout.unset(r);
|
||||
sp::set_counter_write_rpc_timeout.unset(r);
|
||||
sp::get_cas_contention_timeout.unset(r);
|
||||
sp::set_cas_contention_timeout.unset(r);
|
||||
sp::get_range_rpc_timeout.unset(r);
|
||||
sp::set_range_rpc_timeout.unset(r);
|
||||
sp::get_truncate_rpc_timeout.unset(r);
|
||||
sp::set_truncate_rpc_timeout.unset(r);
|
||||
sp::reload_trigger_classes.unset(r);
|
||||
sp::get_read_repair_attempted.unset(r);
|
||||
sp::get_read_repair_repaired_blocking.unset(r);
|
||||
sp::get_read_repair_repaired_background.unset(r);
|
||||
sp::get_cas_read_timeouts.unset(r);
|
||||
sp::get_cas_read_unavailables.unset(r);
|
||||
sp::get_cas_write_timeouts.unset(r);
|
||||
sp::get_cas_write_unavailables.unset(r);
|
||||
sp::get_cas_write_metrics_unfinished_commit.unset(r);
|
||||
sp::get_cas_write_metrics_contention.unset(r);
|
||||
sp::get_cas_write_metrics_condition_not_met.unset(r);
|
||||
sp::get_cas_write_metrics_failed_read_round_optimization.unset(r);
|
||||
sp::get_cas_read_metrics_unfinished_commit.unset(r);
|
||||
sp::get_cas_read_metrics_contention.unset(r);
|
||||
sp::get_read_metrics_timeouts.unset(r);
|
||||
sp::get_read_metrics_unavailables.unset(r);
|
||||
sp::get_range_metrics_timeouts.unset(r);
|
||||
sp::get_range_metrics_unavailables.unset(r);
|
||||
sp::get_write_metrics_timeouts.unset(r);
|
||||
sp::get_write_metrics_unavailables.unset(r);
|
||||
sp::get_read_metrics_timeouts_rates.unset(r);
|
||||
sp::get_read_metrics_unavailables_rates.unset(r);
|
||||
sp::get_range_metrics_timeouts_rates.unset(r);
|
||||
sp::get_range_metrics_unavailables_rates.unset(r);
|
||||
sp::get_write_metrics_timeouts_rates.unset(r);
|
||||
sp::get_write_metrics_unavailables_rates.unset(r);
|
||||
sp::get_range_metrics_latency_histogram_depricated.unset(r);
|
||||
sp::get_write_metrics_latency_histogram_depricated.unset(r);
|
||||
sp::get_read_metrics_latency_histogram_depricated.unset(r);
|
||||
sp::get_range_metrics_latency_histogram.unset(r);
|
||||
sp::get_write_metrics_latency_histogram.unset(r);
|
||||
sp::get_cas_write_metrics_latency_histogram.unset(r);
|
||||
sp::get_cas_read_metrics_latency_histogram.unset(r);
|
||||
sp::get_view_write_metrics_latency_histogram.unset(r);
|
||||
sp::get_read_metrics_latency_histogram.unset(r);
|
||||
sp::get_read_estimated_histogram.unset(r);
|
||||
sp::get_read_latency.unset(r);
|
||||
sp::get_write_estimated_histogram.unset(r);
|
||||
sp::get_write_latency.unset(r);
|
||||
sp::get_range_estimated_histogram.unset(r);
|
||||
sp::get_range_latency.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -11,11 +11,10 @@
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "api.hh"
|
||||
|
||||
namespace service { class storage_proxy; }
|
||||
namespace service { class storage_service; }
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_storage_proxy(http_context& ctx, httpd::routes& r, sharded<service::storage_proxy>& proxy);
|
||||
void unset_storage_proxy(http_context& ctx, httpd::routes& r);
|
||||
void set_storage_proxy(http_context& ctx, routes& r, sharded<service::storage_service>& ss);
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,8 +8,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "api.hh"
|
||||
#include "db/data_listeners.hh"
|
||||
@@ -21,10 +19,10 @@ class snapshot_ctl;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
namespace cdc { class generation_service; }
|
||||
class sstables_loader;
|
||||
|
||||
namespace gms {
|
||||
@@ -35,42 +33,28 @@ class gossiper;
|
||||
|
||||
namespace api {
|
||||
|
||||
// verify that the keyspace is found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective keyspace error.
|
||||
sstring validate_keyspace(const http_context& ctx, sstring ks_name);
|
||||
|
||||
// verify that the keyspace parameter is found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective keyspace error.
|
||||
sstring validate_keyspace(const http_context& ctx, const std::unique_ptr<http::request>& req);
|
||||
sstring validate_keyspace(http_context& ctx, const parameters& param);
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
// Returns an empty vector if no parameter was found.
|
||||
// If the parameter is found and empty, returns a list of all table names in the keyspace.
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name);
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
// Returns a vector of all table infos given by the parameter, or
|
||||
// if the parameter is not found or is empty, returns a list of all table infos in the keyspace.
|
||||
std::vector<table_info> parse_table_infos(const sstring& ks_name, http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name);
|
||||
|
||||
void set_storage_service(http_context& ctx, httpd::routes& r, sharded<service::storage_service>& ss, service::raft_group0_client&);
|
||||
void unset_storage_service(http_context& ctx, httpd::routes& r);
|
||||
void set_sstables_loader(http_context& ctx, httpd::routes& r, sharded<sstables_loader>& sst_loader);
|
||||
void unset_sstables_loader(http_context& ctx, httpd::routes& r);
|
||||
void set_view_builder(http_context& ctx, httpd::routes& r, sharded<db::view::view_builder>& vb);
|
||||
void unset_view_builder(http_context& ctx, httpd::routes& r);
|
||||
void set_repair(http_context& ctx, httpd::routes& r, sharded<repair_service>& repair);
|
||||
void unset_repair(http_context& ctx, httpd::routes& r);
|
||||
void set_transport_controller(http_context& ctx, httpd::routes& r, cql_transport::controller& ctl);
|
||||
void unset_transport_controller(http_context& ctx, httpd::routes& r);
|
||||
void set_rpc_controller(http_context& ctx, httpd::routes& r, thrift_controller& ctl);
|
||||
void unset_rpc_controller(http_context& ctx, httpd::routes& r);
|
||||
void set_snapshot(http_context& ctx, httpd::routes& r, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
void unset_snapshot(http_context& ctx, httpd::routes& r);
|
||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, gms::gossiper& g, sharded<cdc::generation_service>& cdc_gs);
|
||||
void set_sstables_loader(http_context& ctx, routes& r, sharded<sstables_loader>& sst_loader);
|
||||
void unset_sstables_loader(http_context& ctx, routes& r);
|
||||
void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_builder>& vb);
|
||||
void unset_view_builder(http_context& ctx, routes& r);
|
||||
void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair);
|
||||
void unset_repair(http_context& ctx, routes& r);
|
||||
void set_transport_controller(http_context& ctx, routes& r, cql_transport::controller& ctl);
|
||||
void unset_transport_controller(http_context& ctx, routes& r);
|
||||
void set_rpc_controller(http_context& ctx, routes& r, thrift_controller& ctl);
|
||||
void unset_rpc_controller(http_context& ctx, routes& r);
|
||||
void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
void unset_snapshot(http_context& ctx, routes& r);
|
||||
seastar::future<json::json_return_type> run_toppartitions_query(db::toppartitions_query& q, http_context &ctx, bool legacy_request = false);
|
||||
|
||||
} // namespace api
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
namespace hs = httpd::stream_manager_json;
|
||||
|
||||
@@ -22,7 +21,7 @@ static void set_summaries(const std::vector<streaming::stream_summary>& from,
|
||||
json::json_list<hs::stream_summary>& to) {
|
||||
if (!from.empty()) {
|
||||
hs::stream_summary res;
|
||||
res.cf_id = fmt::to_string(from.front().cf_id);
|
||||
res.cf_id = boost::lexical_cast<std::string>(from.front().cf_id);
|
||||
// For each stream_session, we pretend we are sending/receiving one
|
||||
// file, to make it compatible with nodetool.
|
||||
res.files = 1;
|
||||
@@ -39,7 +38,7 @@ static hs::progress_info get_progress_info(const streaming::progress_info& info)
|
||||
res.current_bytes = info.current_bytes;
|
||||
res.direction = info.dir;
|
||||
res.file_name = info.file_name;
|
||||
res.peer = fmt::to_string(info.peer);
|
||||
res.peer = boost::lexical_cast<std::string>(info.peer);
|
||||
res.session_index = 0;
|
||||
res.total_bytes = info.total_bytes;
|
||||
return res;
|
||||
@@ -62,7 +61,7 @@ static hs::stream_state get_state(
|
||||
state.plan_id = result_future.plan_id.to_sstring();
|
||||
for (auto info : result_future.get_coordinator().get()->get_all_session_info()) {
|
||||
hs::stream_info si;
|
||||
si.peer = fmt::to_string(info.peer);
|
||||
si.peer = boost::lexical_cast<std::string>(info.peer);
|
||||
si.session_index = 0;
|
||||
si.state = info.state;
|
||||
si.connecting = si.peer;
|
||||
@@ -106,7 +105,7 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
|
||||
hs::get_total_incoming_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
gms::inet_address peer(req->get_path_param("peer"));
|
||||
gms::inet_address peer(req->param["peer"]);
|
||||
return sm.map_reduce0([peer](streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards(peer).then([] (auto sbytes) {
|
||||
return sbytes.bytes_received;
|
||||
@@ -127,7 +126,7 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
|
||||
hs::get_total_outgoing_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
gms::inet_address peer(req->get_path_param("peer"));
|
||||
gms::inet_address peer(req->param["peer"]);
|
||||
return sm.map_reduce0([peer] (streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards(peer).then([] (auto sbytes) {
|
||||
return sbytes.bytes_sent;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_stream_manager(http_context& ctx, httpd::routes& r, sharded<streaming::stream_manager>& sm);
|
||||
void unset_stream_manager(http_context& ctx, httpd::routes& r);
|
||||
void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_manager>& sm);
|
||||
void unset_stream_manager(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -7,98 +7,20 @@
|
||||
*/
|
||||
|
||||
#include "api/api-doc/system.json.hh"
|
||||
#include "api/api-doc/metrics.json.hh"
|
||||
|
||||
#include "api/api.hh"
|
||||
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/core/metrics_api.hh>
|
||||
#include <seastar/core/relabel_config.hh>
|
||||
#include <seastar/http/exception.hh>
|
||||
#include <seastar/util/short_streams.hh>
|
||||
#include <seastar/http/short_streams.hh>
|
||||
#include "utils/rjson.hh"
|
||||
|
||||
#include "log.hh"
|
||||
#include "replica/database.hh"
|
||||
|
||||
extern logging::logger apilog;
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
namespace hs = httpd::system_json;
|
||||
namespace hm = httpd::metrics_json;
|
||||
|
||||
void set_system(http_context& ctx, routes& r) {
|
||||
hm::get_metrics_config.set(r, [](const_req req) {
|
||||
std::vector<hm::metrics_config> res;
|
||||
res.resize(seastar::metrics::get_relabel_configs().size());
|
||||
size_t i = 0;
|
||||
for (auto&& r : seastar::metrics::get_relabel_configs()) {
|
||||
res[i].action = r.action;
|
||||
res[i].target_label = r.target_label;
|
||||
res[i].replacement = r.replacement;
|
||||
res[i].separator = r.separator;
|
||||
res[i].source_labels = r.source_labels;
|
||||
res[i].regex = r.expr.str();
|
||||
i++;
|
||||
}
|
||||
return res;
|
||||
});
|
||||
|
||||
hm::set_metrics_config.set(r, [](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
rapidjson::Document doc;
|
||||
doc.Parse(req->content.c_str());
|
||||
if (!doc.IsArray()) {
|
||||
throw bad_param_exception("Expected a json array");
|
||||
}
|
||||
std::vector<seastar::metrics::relabel_config> relabels;
|
||||
relabels.resize(doc.Size());
|
||||
for (rapidjson::SizeType i = 0; i < doc.Size(); i++) {
|
||||
const auto& element = doc[i];
|
||||
if (element.HasMember("source_labels")) {
|
||||
std::vector<std::string> source_labels;
|
||||
source_labels.resize(element["source_labels"].Size());
|
||||
|
||||
for (size_t j = 0; j < element["source_labels"].Size(); j++) {
|
||||
source_labels[j] = element["source_labels"][j].GetString();
|
||||
}
|
||||
relabels[i].source_labels = source_labels;
|
||||
}
|
||||
if (element.HasMember("action")) {
|
||||
relabels[i].action = seastar::metrics::relabel_config_action(element["action"].GetString());
|
||||
}
|
||||
if (element.HasMember("replacement")) {
|
||||
relabels[i].replacement = element["replacement"].GetString();
|
||||
}
|
||||
if (element.HasMember("separator")) {
|
||||
relabels[i].separator = element["separator"].GetString();
|
||||
}
|
||||
if (element.HasMember("target_label")) {
|
||||
relabels[i].target_label = element["target_label"].GetString();
|
||||
}
|
||||
if (element.HasMember("regex")) {
|
||||
relabels[i].expr = element["regex"].GetString();
|
||||
}
|
||||
}
|
||||
return do_with(std::move(relabels), false, [](const std::vector<seastar::metrics::relabel_config>& relabels, bool& failed) {
|
||||
return smp::invoke_on_all([&relabels, &failed] {
|
||||
return metrics::set_relabel_configs(relabels).then([&failed](const metrics::metric_relabeling_result& result) {
|
||||
if (result.metrics_relabeled_due_to_collision > 0) {
|
||||
failed = true;
|
||||
}
|
||||
return;
|
||||
});
|
||||
}).then([&failed](){
|
||||
if (failed) {
|
||||
throw bad_param_exception("conflicts found during relabeling");
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(seastar::json::json_void());
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
hs::get_system_uptime.set(r, [](const_req req) {
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(engine().uptime()).count();
|
||||
});
|
||||
@@ -119,9 +41,9 @@ void set_system(http_context& ctx, routes& r) {
|
||||
|
||||
hs::get_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
return logging::level_name(logging::logger_registry().get_logger_level(req.get_path_param("name")));
|
||||
return logging::level_name(logging::logger_registry().get_logger_level(req.param["name"]));
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception("Unknown logger name " + req.get_path_param("name"));
|
||||
throw bad_param_exception("Unknown logger name " + req.param["name"]);
|
||||
}
|
||||
// just to keep the compiler happy
|
||||
return sstring();
|
||||
@@ -130,19 +52,9 @@ void set_system(http_context& ctx, routes& r) {
|
||||
hs::set_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
|
||||
logging::logger_registry().set_logger_level(req.get_path_param("name"), level);
|
||||
logging::logger_registry().set_logger_level(req.param["name"], level);
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception("Unknown logger name " + req.get_path_param("name"));
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
|
||||
}
|
||||
return json::json_void();
|
||||
});
|
||||
|
||||
hs::write_log_message.set(r, [](const_req req) {
|
||||
try {
|
||||
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
|
||||
apilog.log(level, "/system/log: {}", std::string(req.get_query_param("message")));
|
||||
throw bad_param_exception("Unknown logger name " + req.param["name"]);
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
|
||||
}
|
||||
|
||||
@@ -12,6 +12,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_system(http_context& ctx, httpd::routes& r);
|
||||
void set_system(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/exception.hh>
|
||||
|
||||
#include "task_manager.hh"
|
||||
#include "api/api-doc/task_manager.json.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "column_family.hh"
|
||||
#include "unimplemented.hh"
|
||||
#include "storage_service.hh"
|
||||
|
||||
#include <utility>
|
||||
#include <boost/range/adaptors.hpp>
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace tm = httpd::task_manager_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
inline bool filter_tasks(tasks::task_manager::task_ptr task, std::unordered_map<sstring, sstring>& query_params) {
|
||||
return (!query_params.contains("keyspace") || query_params["keyspace"] == task->get_status().keyspace) &&
|
||||
(!query_params.contains("table") || query_params["table"] == task->get_status().table);
|
||||
}
|
||||
|
||||
struct full_task_status {
|
||||
tasks::task_manager::task::status task_status;
|
||||
std::string type;
|
||||
tasks::task_manager::task::progress progress;
|
||||
std::string module;
|
||||
tasks::task_id parent_id;
|
||||
tasks::is_abortable abortable;
|
||||
std::vector<std::string> children_ids;
|
||||
};
|
||||
|
||||
struct task_stats {
|
||||
task_stats(tasks::task_manager::task_ptr task)
|
||||
: task_id(task->id().to_sstring())
|
||||
, state(task->get_status().state)
|
||||
, type(task->type())
|
||||
, scope(task->get_status().scope)
|
||||
, keyspace(task->get_status().keyspace)
|
||||
, table(task->get_status().table)
|
||||
, entity(task->get_status().entity)
|
||||
, sequence_number(task->get_status().sequence_number)
|
||||
{ }
|
||||
|
||||
sstring task_id;
|
||||
tasks::task_manager::task_state state;
|
||||
std::string type;
|
||||
std::string scope;
|
||||
std::string keyspace;
|
||||
std::string table;
|
||||
std::string entity;
|
||||
uint64_t sequence_number;
|
||||
};
|
||||
|
||||
tm::task_status make_status(full_task_status status) {
|
||||
auto start_time = db_clock::to_time_t(status.task_status.start_time);
|
||||
auto end_time = db_clock::to_time_t(status.task_status.end_time);
|
||||
::tm st, et;
|
||||
::gmtime_r(&end_time, &et);
|
||||
::gmtime_r(&start_time, &st);
|
||||
|
||||
tm::task_status res{};
|
||||
res.id = status.task_status.id.to_sstring();
|
||||
res.type = status.type;
|
||||
res.scope = status.task_status.scope;
|
||||
res.state = status.task_status.state;
|
||||
res.is_abortable = bool(status.abortable);
|
||||
res.start_time = st;
|
||||
res.end_time = et;
|
||||
res.error = status.task_status.error;
|
||||
res.parent_id = status.parent_id.to_sstring();
|
||||
res.sequence_number = status.task_status.sequence_number;
|
||||
res.shard = status.task_status.shard;
|
||||
res.keyspace = status.task_status.keyspace;
|
||||
res.table = status.task_status.table;
|
||||
res.entity = status.task_status.entity;
|
||||
res.progress_units = status.task_status.progress_units;
|
||||
res.progress_total = status.progress.total;
|
||||
res.progress_completed = status.progress.completed;
|
||||
res.children_ids = std::move(status.children_ids);
|
||||
return res;
|
||||
}
|
||||
|
||||
future<full_task_status> retrieve_status(const tasks::task_manager::foreign_task_ptr& task) {
|
||||
if (task.get() == nullptr) {
|
||||
co_return coroutine::return_exception(httpd::bad_param_exception("Task not found"));
|
||||
}
|
||||
auto progress = co_await task->get_progress();
|
||||
full_task_status s;
|
||||
s.task_status = task->get_status();
|
||||
s.type = task->type();
|
||||
s.parent_id = task->get_parent_id();
|
||||
s.abortable = task->is_abortable();
|
||||
s.module = task->get_module_name();
|
||||
s.progress.completed = progress.completed;
|
||||
s.progress.total = progress.total;
|
||||
std::vector<std::string> ct{task->get_children().size()};
|
||||
boost::transform(task->get_children(), ct.begin(), [] (const auto& child) {
|
||||
return child->id().to_sstring();
|
||||
});
|
||||
s.children_ids = std::move(ct);
|
||||
co_return s;
|
||||
}
|
||||
|
||||
void set_task_manager(http_context& ctx, routes& r, db::config& cfg) {
|
||||
tm::get_modules.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
std::vector<std::string> v = boost::copy_range<std::vector<std::string>>(ctx.tm.local().get_modules() | boost::adaptors::map_keys);
|
||||
co_return v;
|
||||
});
|
||||
|
||||
tm::get_tasks.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
using chunked_stats = utils::chunked_vector<task_stats>;
|
||||
auto internal = tasks::is_internal{req_param<bool>(*req, "internal", false)};
|
||||
std::vector<chunked_stats> res = co_await ctx.tm.map([&req, internal] (tasks::task_manager& tm) {
|
||||
chunked_stats local_res;
|
||||
tasks::task_manager::module_ptr module;
|
||||
try {
|
||||
module = tm.find_module(req->get_path_param("module"));
|
||||
} catch (...) {
|
||||
throw bad_param_exception(fmt::format("{}", std::current_exception()));
|
||||
}
|
||||
const auto& filtered_tasks = module->get_tasks() | boost::adaptors::filtered([¶ms = req->query_parameters, internal] (const auto& task) {
|
||||
return (internal || !task.second->is_internal()) && filter_tasks(task.second, params);
|
||||
});
|
||||
for (auto& [task_id, task] : filtered_tasks) {
|
||||
local_res.push_back(task_stats{task});
|
||||
}
|
||||
return local_res;
|
||||
});
|
||||
|
||||
std::function<future<>(output_stream<char>&&)> f = [r = std::move(res)] (output_stream<char>&& os) -> future<> {
|
||||
auto s = std::move(os);
|
||||
std::exception_ptr ex;
|
||||
try {
|
||||
auto res = std::move(r);
|
||||
co_await s.write("[");
|
||||
std::string delim = "";
|
||||
for (auto& v: res) {
|
||||
for (auto& stats: v) {
|
||||
co_await s.write(std::exchange(delim, ", "));
|
||||
tm::task_stats ts;
|
||||
ts = stats;
|
||||
co_await formatter::write(s, ts);
|
||||
}
|
||||
}
|
||||
co_await s.write("]");
|
||||
co_await s.flush();
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
co_await s.close();
|
||||
if (ex) {
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
};
|
||||
co_return std::move(f);
|
||||
});
|
||||
|
||||
tm::get_task_status.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
task = co_await tasks::task_manager::invoke_on_task(ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
if (task->is_complete()) {
|
||||
task->unregister_task();
|
||||
}
|
||||
co_return std::move(task);
|
||||
}));
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
auto s = co_await retrieve_status(task);
|
||||
co_return make_status(s);
|
||||
});
|
||||
|
||||
tm::abort_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
if (!task->is_abortable()) {
|
||||
co_await coroutine::return_exception(std::runtime_error("Requested task cannot be aborted"));
|
||||
}
|
||||
co_await task->abort();
|
||||
});
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tm::wait_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
task = co_await tasks::task_manager::invoke_on_task(ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) {
|
||||
return task->done().then_wrapped([task] (auto f) {
|
||||
task->unregister_task();
|
||||
// done() is called only because we want the task to be complete before getting its status.
|
||||
// The future should be ignored here as the result does not matter.
|
||||
f.ignore_ready_future();
|
||||
return make_foreign(task);
|
||||
});
|
||||
}));
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
auto s = co_await retrieve_status(task);
|
||||
co_return make_status(s);
|
||||
});
|
||||
|
||||
tm::get_task_status_recursively.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& _ctx = ctx;
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
std::queue<tasks::task_manager::foreign_task_ptr> q;
|
||||
utils::chunked_vector<full_task_status> res;
|
||||
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
// Get requested task.
|
||||
task = co_await tasks::task_manager::invoke_on_task(_ctx.tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
if (task->is_complete()) {
|
||||
task->unregister_task();
|
||||
}
|
||||
co_return task;
|
||||
}));
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
|
||||
// Push children's statuses in BFS order.
|
||||
q.push(co_await task.copy()); // Task cannot be moved since we need it to be alive during whole loop execution.
|
||||
while (!q.empty()) {
|
||||
auto& current = q.front();
|
||||
res.push_back(co_await retrieve_status(current));
|
||||
for (auto& child: current->get_children()) {
|
||||
q.push(co_await child.copy());
|
||||
}
|
||||
q.pop();
|
||||
}
|
||||
|
||||
std::function<future<>(output_stream<char>&&)> f = [r = std::move(res)] (output_stream<char>&& os) -> future<> {
|
||||
auto s = std::move(os);
|
||||
auto res = std::move(r);
|
||||
co_await s.write("[");
|
||||
std::string delim = "";
|
||||
for (auto& status: res) {
|
||||
co_await s.write(std::exchange(delim, ", "));
|
||||
co_await formatter::write(s, make_status(status));
|
||||
}
|
||||
co_await s.write("]");
|
||||
co_await s.close();
|
||||
};
|
||||
co_return f;
|
||||
});
|
||||
|
||||
tm::get_and_update_ttl.set(r, [&cfg] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
uint32_t ttl = cfg.task_ttl_seconds();
|
||||
try {
|
||||
co_await cfg.task_ttl_seconds.set_value_on_all_shards(req->query_parameters["ttl"], utils::config_file::config_source::API);
|
||||
} catch (...) {
|
||||
throw bad_param_exception(fmt::format("{}", std::current_exception()));
|
||||
}
|
||||
co_return json::json_return_type(ttl);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
#include "db/config.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager(http_context& ctx, httpd::routes& r, db::config& cfg);
|
||||
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
#include "task_manager_test.hh"
|
||||
#include "api/api-doc/task_manager_test.json.hh"
|
||||
#include "tasks/test_module.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace tmt = httpd::task_manager_test_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
void set_task_manager_test(http_context& ctx, routes& r) {
|
||||
tmt::register_test_module.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await ctx.tm.invoke_on_all([] (tasks::task_manager& tm) {
|
||||
auto m = make_shared<tasks::test_module>(tm);
|
||||
tm.register_module("test", m);
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::unregister_test_module.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await ctx.tm.invoke_on_all([] (tasks::task_manager& tm) -> future<> {
|
||||
auto module_name = "test";
|
||||
auto module = tm.find_module(module_name);
|
||||
co_await module->stop();
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::register_test_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
sharded<tasks::task_manager>& tms = ctx.tm;
|
||||
auto it = req->query_parameters.find("task_id");
|
||||
auto id = it != req->query_parameters.end() ? tasks::task_id{utils::UUID{it->second}} : tasks::task_id::create_null_id();
|
||||
it = req->query_parameters.find("shard");
|
||||
unsigned shard = it != req->query_parameters.end() ? boost::lexical_cast<unsigned>(it->second) : 0;
|
||||
it = req->query_parameters.find("keyspace");
|
||||
std::string keyspace = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("table");
|
||||
std::string table = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("entity");
|
||||
std::string entity = it != req->query_parameters.end() ? it->second : "";
|
||||
it = req->query_parameters.find("parent_id");
|
||||
tasks::task_info data;
|
||||
if (it != req->query_parameters.end()) {
|
||||
data.id = tasks::task_id{utils::UUID{it->second}};
|
||||
auto parent_ptr = co_await tasks::task_manager::lookup_task_on_all_shards(ctx.tm, data.id);
|
||||
data.shard = parent_ptr->get_status().shard;
|
||||
}
|
||||
|
||||
auto module = tms.local().find_module("test");
|
||||
id = co_await module->make_task<tasks::test_task_impl>(shard, id, keyspace, table, entity, data);
|
||||
co_await tms.invoke_on(shard, [id] (tasks::task_manager& tm) {
|
||||
auto it = tm.get_all_tasks().find(id);
|
||||
if (it != tm.get_all_tasks().end()) {
|
||||
it->second->start();
|
||||
}
|
||||
});
|
||||
co_return id.to_sstring();
|
||||
});
|
||||
|
||||
tmt::unregister_test_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->query_parameters["task_id"]}};
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
tasks::test_task test_task{task};
|
||||
co_await test_task.unregister_task();
|
||||
});
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tmt::finish_test_task.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
auto it = req->query_parameters.find("error");
|
||||
bool fail = it != req->query_parameters.end();
|
||||
std::string error = fail ? it->second : "";
|
||||
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(ctx.tm, id, [fail, error = std::move(error)] (tasks::task_manager::task_ptr task) {
|
||||
tasks::test_task test_task{task};
|
||||
if (fail) {
|
||||
test_task.finish_failed(std::make_exception_ptr(std::runtime_error(error)));
|
||||
} else {
|
||||
test_task.finish();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
co_return json_void();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,21 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager_test(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user