Compare commits
185 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d630e068b | ||
|
|
5a8e9698d8 | ||
|
|
64f1aa8d99 | ||
|
|
280e6eedb9 | ||
|
|
f80f15a6af | ||
|
|
d0eb0c0b90 | ||
|
|
1427c4d428 | ||
|
|
034f2cb42d | ||
|
|
e043a5c276 | ||
|
|
5da9bd3a6e | ||
|
|
3578027e2e | ||
|
|
7d2150a057 | ||
|
|
afd3c571cc | ||
|
|
093c8512db | ||
|
|
9c0b8ec736 | ||
|
|
1794b732b0 | ||
|
|
c1ac4fb8b0 | ||
|
|
2e7e59fb50 | ||
|
|
af29d4bed3 | ||
|
|
72494bbe05 | ||
|
|
5784823888 | ||
|
|
a7633be1a9 | ||
|
|
e78ded74ce | ||
|
|
6615c2a6a9 | ||
|
|
11500ccd3a | ||
|
|
955f3eeb56 | ||
|
|
08bfd96774 | ||
|
|
f6c4d558eb | ||
|
|
0040ff6de2 | ||
|
|
c238bc7a81 | ||
|
|
3b984a4293 | ||
|
|
156761d77e | ||
|
|
8e33e80ad3 | ||
|
|
c35dd86c87 | ||
|
|
87cb8a1fa4 | ||
|
|
26f3340c32 | ||
|
|
aaba093371 | ||
|
|
a64c6e6be9 | ||
|
|
c83d2d0d77 | ||
|
|
0aa49d0311 | ||
|
|
cce455b1f5 | ||
|
|
6772f3806b | ||
|
|
6c9d699835 | ||
|
|
a75e1632c8 | ||
|
|
c5718bf620 | ||
|
|
2315fcd6cf | ||
|
|
8c5464d2fd | ||
|
|
346d2788e3 | ||
|
|
4f68fede6d | ||
|
|
681f9e4f50 | ||
|
|
c503bc7693 | ||
|
|
de7024251b | ||
|
|
9a0eb2319c | ||
|
|
9ef462449b | ||
|
|
6271f30716 | ||
|
|
8b64e80c88 | ||
|
|
c5bffcaa68 | ||
|
|
8aa0b60e91 | ||
|
|
dccf762654 | ||
|
|
e5344079d9 | ||
|
|
7bc8515c48 | ||
|
|
1228a41eaa | ||
|
|
58b90ceee0 | ||
|
|
ef46067606 | ||
|
|
ffdd0f6392 | ||
|
|
3ab1c8abff | ||
|
|
d306c40507 | ||
|
|
b98d5b30de | ||
|
|
85f5e57502 | ||
|
|
19158f3401 | ||
|
|
a7e40d6acb | ||
|
|
eedcfedd5a | ||
|
|
b655fe262b | ||
|
|
cbb3b959e3 | ||
|
|
3dd282f7f0 | ||
|
|
574548e50f | ||
|
|
688d58f54a | ||
|
|
ea9b0bb4b0 | ||
|
|
6a9b026601 | ||
|
|
adc1523aaa | ||
|
|
5444eead08 | ||
|
|
1e74362ec9 | ||
|
|
72e52dafba | ||
|
|
29746e1e7b | ||
|
|
13cd56774f | ||
|
|
812018479b | ||
|
|
0ee2462811 | ||
|
|
c8bc3a7053 | ||
|
|
9f78799e80 | ||
|
|
5bba3856ca | ||
|
|
63e92418dd | ||
|
|
9eaa6f233e | ||
|
|
6600317b2c | ||
|
|
807acb2dd9 | ||
|
|
5e44bf97f0 | ||
|
|
4003be40b3 | ||
|
|
cf059b6ee2 | ||
|
|
d96c31ee4d | ||
|
|
680ce234b0 | ||
|
|
ad656b2c55 | ||
|
|
43101b6bff | ||
|
|
492a5c8886 | ||
|
|
152747b8fd | ||
|
|
00c08519a7 | ||
|
|
5d47a39b7b | ||
|
|
4f8e8bdc04 | ||
|
|
ef1dab4565 | ||
|
|
3f602814ba | ||
|
|
83d4e85e00 | ||
|
|
857ffeefce | ||
|
|
a845e23702 | ||
|
|
f9b14df3a3 | ||
|
|
ae47dfde7d | ||
|
|
cc15a13365 | ||
|
|
6e14dcb84c | ||
|
|
9ed64cc11c | ||
|
|
d4c46afc50 | ||
|
|
f371d17884 | ||
|
|
0a82a885a4 | ||
|
|
17febfdb0e | ||
|
|
830bf99528 | ||
|
|
90000d9861 | ||
|
|
46dae42dcd | ||
|
|
d6395634ad | ||
|
|
d886b3def4 | ||
|
|
bcb06bb043 | ||
|
|
4606300b25 | ||
|
|
282d93de99 | ||
|
|
52d3403cb0 | ||
|
|
97f6073699 | ||
|
|
5454e6e168 | ||
|
|
498fb11c70 | ||
|
|
a6b4881994 | ||
|
|
9848df6667 | ||
|
|
2090a5f8f6 | ||
|
|
7634ed39eb | ||
|
|
fb9b15904a | ||
|
|
4e11f05aa7 | ||
|
|
516a1ae834 | ||
|
|
be5127388d | ||
|
|
6d0679ca72 | ||
|
|
eb67b427b2 | ||
|
|
2931324b34 | ||
|
|
614519c4be | ||
|
|
203b924c76 | ||
|
|
f4f957fa53 | ||
|
|
39e614a444 | ||
|
|
d8521d0fa2 | ||
|
|
f60696b55f | ||
|
|
1b15a0926a | ||
|
|
32efd3902c | ||
|
|
6b2f7f8c39 | ||
|
|
370a6482e3 | ||
|
|
981644167b | ||
|
|
6f669da227 | ||
|
|
bdf1173075 | ||
|
|
106c69ad45 | ||
|
|
740fcc73b8 | ||
|
|
cefbb0b999 | ||
|
|
02f43f5e4c | ||
|
|
8850ef7c59 | ||
|
|
8567723a7b | ||
|
|
b0b7c73acd | ||
|
|
eb82d66849 | ||
|
|
eb12fb3733 | ||
|
|
60d011c9c0 | ||
|
|
7c3390bde8 | ||
|
|
95b55a0e9d | ||
|
|
7785d8f396 | ||
|
|
b805e37d30 | ||
|
|
a790b8cd20 | ||
|
|
a10ea80a63 | ||
|
|
91a5c9d20c | ||
|
|
f846b897bf | ||
|
|
8d7c34bf68 | ||
|
|
7449586a26 | ||
|
|
b601b9f078 | ||
|
|
1ec81cda37 | ||
|
|
e87a2bc9c0 | ||
|
|
b84d13d325 | ||
|
|
b5abf6541d | ||
|
|
8cf869cb37 | ||
|
|
df509761b0 | ||
|
|
b90e11264e | ||
|
|
84b2bff0a6 |
@@ -1,4 +0,0 @@
|
||||
.git
|
||||
build
|
||||
seastar/build
|
||||
testlog
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,4 +1,2 @@
|
||||
*.cc diff=cpp
|
||||
*.hh diff=cpp
|
||||
*.svg binary
|
||||
docs/_static/api/js/* binary
|
||||
|
||||
101
.github/CODEOWNERS
vendored
101
.github/CODEOWNERS
vendored
@@ -1,101 +0,0 @@
|
||||
# AUTH
|
||||
auth/* @elcallio @vladzcloudius
|
||||
|
||||
# CACHE
|
||||
row_cache* @tgrabiec
|
||||
*mutation* @tgrabiec
|
||||
test/boost/mvcc* @tgrabiec
|
||||
|
||||
# CDC
|
||||
cdc/* @kbr- @elcallio @piodul @jul-stas
|
||||
test/cql/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
test/boost/cdc_* @kbr- @elcallio @piodul @jul-stas
|
||||
|
||||
# COMMITLOG / BATCHLOG
|
||||
db/commitlog/* @elcallio @eliransin
|
||||
db/batch* @elcallio
|
||||
|
||||
# COORDINATOR
|
||||
service/storage_proxy* @gleb-cloudius
|
||||
|
||||
# COMPACTION
|
||||
compaction/* @raphaelsc
|
||||
|
||||
# CQL TRANSPORT LAYER
|
||||
transport/*
|
||||
|
||||
# CQL QUERY LANGUAGE
|
||||
cql3/* @tgrabiec
|
||||
|
||||
# COUNTERS
|
||||
counters* @jul-stas
|
||||
tests/counter_test* @jul-stas
|
||||
|
||||
# DOCS
|
||||
docs/* @annastuchlik @tzach
|
||||
docs/alternator @annastuchlik @tzach @nyh @havaker @nuivall
|
||||
|
||||
# GOSSIP
|
||||
gms/* @tgrabiec @asias
|
||||
|
||||
# DOCKER
|
||||
dist/docker/*
|
||||
|
||||
# LSA
|
||||
utils/logalloc* @tgrabiec
|
||||
|
||||
# MATERIALIZED VIEWS
|
||||
db/view/* @nyh @piodul
|
||||
cql3/statements/*view* @nyh @piodul
|
||||
test/boost/view_* @nyh @piodul
|
||||
|
||||
# PACKAGING
|
||||
dist/* @syuu1228
|
||||
|
||||
# REPAIR
|
||||
repair/* @tgrabiec @asias
|
||||
|
||||
# SCHEMA MANAGEMENT
|
||||
db/schema_tables* @tgrabiec
|
||||
db/legacy_schema_migrator* @tgrabiec
|
||||
service/migration* @tgrabiec
|
||||
schema* @tgrabiec
|
||||
|
||||
# SECONDARY INDEXES
|
||||
index/* @nyh @piodul
|
||||
cql3/statements/*index* @nyh @piodul
|
||||
test/boost/*index* @nyh @piodul
|
||||
|
||||
# SSTABLES
|
||||
sstables/* @tgrabiec @raphaelsc
|
||||
|
||||
# STREAMING
|
||||
streaming/* @tgrabiec @asias
|
||||
service/storage_service.* @tgrabiec @asias
|
||||
|
||||
# ALTERNATOR
|
||||
alternator/* @havaker @nuivall
|
||||
test/alternator/* @havaker @nuivall
|
||||
|
||||
# HINTED HANDOFF
|
||||
db/hints/* @piodul @vladzcloudius @eliransin
|
||||
|
||||
# REDIS
|
||||
redis/* @syuu1228
|
||||
test/redis/* @syuu1228
|
||||
|
||||
# READERS
|
||||
reader_* @denesb
|
||||
querier* @denesb
|
||||
test/boost/mutation_reader_test.cc @denesb
|
||||
test/boost/querier_cache_test.cc @denesb
|
||||
|
||||
# PYTEST-BASED CQL TESTS
|
||||
test/cql-pytest/* @nyh
|
||||
|
||||
# RAFT
|
||||
raft/* @kbr- @gleb-cloudius @kostja
|
||||
test/raft/* @kbr- @gleb-cloudius @kostja
|
||||
|
||||
# HEAT-WEIGHTED LOAD BALANCING
|
||||
db/heat_load_balance.* @nyh @gleb-cloudius
|
||||
6
.github/ISSUE_TEMPLATE.md
vendored
6
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,9 +1,3 @@
|
||||
This is Scylla's bug tracker, to be used for reporting bugs only.
|
||||
If you have a question about Scylla, and not a bug, please ask it in
|
||||
our mailing-list at scylladb-dev@googlegroups.com or in our slack channel.
|
||||
|
||||
- [] I have read the disclaimer above, and I am reporting a suspected malfunction in Scylla.
|
||||
|
||||
*Installation details*
|
||||
Scylla version (or git commit hash):
|
||||
Cluster size:
|
||||
|
||||
20
.github/clang-include-cleaner.json
vendored
20
.github/clang-include-cleaner.json
vendored
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"problemMatcher": [
|
||||
{
|
||||
"owner": "clang-include-cleaner",
|
||||
"severity": "error",
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": "^([^\\-\\+].*)$",
|
||||
"file": 1
|
||||
},
|
||||
{
|
||||
"regexp": "^(-\\s+[^\\s]+)\\s+@Line:(\\d+)$",
|
||||
"line": 2,
|
||||
"message": 1,
|
||||
"loop": true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
18
.github/clang-matcher.json
vendored
18
.github/clang-matcher.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"problemMatcher": [
|
||||
{
|
||||
"owner": "clang",
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": "^([^:]+):(\\d+):(\\d+):\\s+(warning|error):\\s+(.*?)\\s+\\[(.*?)\\]$",
|
||||
"file": 1,
|
||||
"line": 2,
|
||||
"column": 3,
|
||||
"severity": 4,
|
||||
"message": 5,
|
||||
"code": 6
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
92
.github/mergify.yml
vendored
92
.github/mergify.yml
vendored
@@ -1,92 +0,0 @@
|
||||
pull_request_rules:
|
||||
- name: put PR in draft if conflicts
|
||||
conditions:
|
||||
- label = conflicts
|
||||
- author = mergify[bot]
|
||||
- head ~= ^mergify/
|
||||
actions:
|
||||
edit:
|
||||
draft: true
|
||||
- name: Delete mergify backport branch
|
||||
conditions:
|
||||
- base~=branch-
|
||||
- or:
|
||||
- merged
|
||||
- closed
|
||||
actions:
|
||||
delete_head_branch:
|
||||
- name: Automate backport pull request 5.2
|
||||
conditions:
|
||||
- or:
|
||||
- closed
|
||||
- merged
|
||||
- or:
|
||||
- base=master
|
||||
- base=next
|
||||
- label=backport/5.2 # The PR must have this label to trigger the backport
|
||||
- label=promoted-to-master
|
||||
actions:
|
||||
copy:
|
||||
title: "[Backport 5.2] {{ title }}"
|
||||
body: |
|
||||
{{ body }}
|
||||
|
||||
{% for c in commits %}
|
||||
(cherry picked from commit {{ c.sha }})
|
||||
{% endfor %}
|
||||
|
||||
Refs #{{number}}
|
||||
branches:
|
||||
- branch-5.2
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
- name: Automate backport pull request 5.4
|
||||
conditions:
|
||||
- or:
|
||||
- closed
|
||||
- merged
|
||||
- or:
|
||||
- base=master
|
||||
- base=next
|
||||
- label=backport/5.4 # The PR must have this label to trigger the backport
|
||||
- label=promoted-to-master
|
||||
actions:
|
||||
copy:
|
||||
title: "[Backport 5.4] {{ title }}"
|
||||
body: |
|
||||
{{ body }}
|
||||
|
||||
{% for c in commits %}
|
||||
(cherry picked from commit {{ c.sha }})
|
||||
{% endfor %}
|
||||
|
||||
Refs #{{number}}
|
||||
branches:
|
||||
- branch-5.4
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
- name: Automate backport pull request 6.0
|
||||
conditions:
|
||||
- or:
|
||||
- closed
|
||||
- merged
|
||||
- or:
|
||||
- base=master
|
||||
- base=next
|
||||
- label=backport/6.0 # The PR must have this label to trigger the backport
|
||||
- label=promoted-to-master
|
||||
actions:
|
||||
copy:
|
||||
title: "[Backport 6.0] {{ title }}"
|
||||
body: |
|
||||
{{ body }}
|
||||
|
||||
{% for c in commits %}
|
||||
(cherry picked from commit {{ c.sha }})
|
||||
{% endfor %}
|
||||
|
||||
Refs #{{number}}
|
||||
branches:
|
||||
- branch-6.0
|
||||
assignees:
|
||||
- "{{ author }}"
|
||||
1
.github/pull_request_template.md
vendored
1
.github/pull_request_template.md
vendored
@@ -1 +0,0 @@
|
||||
**Please replace this line with justification for the backport/\* labels added to this PR**
|
||||
186
.github/scripts/auto-backport.py
vendored
186
.github/scripts/auto-backport.py
vendored
@@ -1,186 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
import logging
|
||||
|
||||
from github import Github, GithubException
|
||||
from git import Repo, GitCommandError
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
try:
|
||||
github_token = os.environ["GITHUB_TOKEN"]
|
||||
except KeyError:
|
||||
print("Please set the 'GITHUB_TOKEN' environment variable")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def is_pull_request():
|
||||
return '--pull-request' in sys.argv[1:]
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--repo', type=str, required=True, help='Github repository name')
|
||||
parser.add_argument('--base-branch', type=str, default='refs/heads/master', help='Base branch')
|
||||
parser.add_argument('--commits', default=None, type=str, help='Range of promoted commits.')
|
||||
parser.add_argument('--pull-request', type=int, help='Pull request number to be backported')
|
||||
parser.add_argument('--head-commit', type=str, required=is_pull_request(), help='The HEAD of target branch after the pull request specified by --pull-request is merged')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def create_pull_request(repo, new_branch_name, base_branch_name, pr, backport_pr_title, commits, is_draft=False):
|
||||
pr_body = f'{pr.body}\n\n'
|
||||
for commit in commits:
|
||||
pr_body += f'- (cherry picked from commit {commit})\n\n'
|
||||
pr_body += f'Parent PR: #{pr.number}'
|
||||
try:
|
||||
backport_pr = repo.create_pull(
|
||||
title=backport_pr_title,
|
||||
body=pr_body,
|
||||
head=f'scylladbbot:{new_branch_name}',
|
||||
base=base_branch_name,
|
||||
draft=is_draft
|
||||
)
|
||||
logging.info(f"Pull request created: {backport_pr.html_url}")
|
||||
backport_pr.add_to_assignees(pr.user)
|
||||
if is_draft:
|
||||
backport_pr.add_to_labels("conflicts")
|
||||
pr_comment = f"@{pr.user} - This PR was marked as draft because it has conflicts\n"
|
||||
pr_comment += "Please resolve them and mark this PR as ready for review"
|
||||
backport_pr.create_issue_comment(pr_comment)
|
||||
logging.info(f"Assigned PR to original author: {pr.user}")
|
||||
return backport_pr
|
||||
except GithubException as e:
|
||||
if 'A pull request already exists' in str(e):
|
||||
logging.warning(f'A pull request already exists for {pr.user}:{new_branch_name}')
|
||||
else:
|
||||
logging.error(f'Failed to create PR: {e}')
|
||||
|
||||
|
||||
def get_pr_commits(repo, pr, stable_branch, start_commit=None):
|
||||
commits = []
|
||||
if pr.merged:
|
||||
merge_commit = repo.get_commit(pr.merge_commit_sha)
|
||||
if len(merge_commit.parents) > 1: # Check if this merge commit includes multiple commits
|
||||
commits.append(pr.merge_commit_sha)
|
||||
else:
|
||||
if start_commit:
|
||||
promoted_commits = repo.compare(start_commit, stable_branch).commits
|
||||
else:
|
||||
promoted_commits = repo.get_commits(sha=stable_branch)
|
||||
for commit in pr.get_commits():
|
||||
for promoted_commit in promoted_commits:
|
||||
commit_title = commit.commit.message.splitlines()[0]
|
||||
# In Scylla-pkg and scylla-dtest, for example,
|
||||
# we don't create a merge commit for a PR with multiple commits,
|
||||
# according to the GitHub API, the last commit will be the merge commit,
|
||||
# which is not what we need when backporting (we need all the commits).
|
||||
# So here, we are validating the correct SHA for each commit so we can cherry-pick
|
||||
if promoted_commit.commit.message.startswith(commit_title):
|
||||
commits.append(promoted_commit.sha)
|
||||
|
||||
elif pr.state == 'closed':
|
||||
events = pr.get_issue_events()
|
||||
for event in events:
|
||||
if event.event == 'closed':
|
||||
commits.append(event.commit_id)
|
||||
return commits
|
||||
|
||||
|
||||
def create_pr_comment_and_remove_label(pr, comment_body):
|
||||
labels = pr.get_labels()
|
||||
pattern = re.compile(r"backport/\d+\.\d+$")
|
||||
for label in labels:
|
||||
if pattern.match(label.name):
|
||||
print(f"Removing label: {label.name}")
|
||||
comment_body += f'- {label.name}\n'
|
||||
pr.remove_from_labels(label)
|
||||
pr.create_issue_comment(comment_body)
|
||||
|
||||
|
||||
def backport(repo, pr, version, commits, backport_base_branch):
|
||||
new_branch_name = f'backport/{pr.number}/to-{version}'
|
||||
backport_pr_title = f'[Backport {version}] {pr.title}'
|
||||
repo_url = f'https://scylladbbot:{github_token}@github.com/{repo.full_name}.git'
|
||||
fork_repo = f'https://scylladbbot:{github_token}@github.com/scylladbbot/{repo.name}.git'
|
||||
with (tempfile.TemporaryDirectory() as local_repo_path):
|
||||
try:
|
||||
repo_local = Repo.clone_from(repo_url, local_repo_path, branch=backport_base_branch)
|
||||
repo_local.git.checkout(b=new_branch_name)
|
||||
is_draft = False
|
||||
for commit in commits:
|
||||
try:
|
||||
repo_local.git.cherry_pick(commit, '-m1', '-x')
|
||||
except GitCommandError as e:
|
||||
logging.warning(f'Cherry-pick conflict on commit {commit}: {e}')
|
||||
is_draft = True
|
||||
repo_local.git.add(A=True)
|
||||
repo_local.git.cherry_pick('--continue')
|
||||
if not repo.private and not repo.has_in_collaborators(pr.user.login):
|
||||
repo.add_to_collaborators(pr.user.login, permission="push")
|
||||
comment = f':warning: @{pr.user.login} you have been added as collaborator to scylladbbot fork '
|
||||
comment += f'Please check your inbox and approve the invitation, once it is done, please add the backport labels again'
|
||||
create_pr_comment_and_remove_label(pr, comment)
|
||||
return
|
||||
repo_local.git.push(fork_repo, new_branch_name, force=True)
|
||||
create_pull_request(repo, new_branch_name, backport_base_branch, pr, backport_pr_title, commits,
|
||||
is_draft=is_draft)
|
||||
|
||||
except GitCommandError as e:
|
||||
logging.warning(f"GitCommandError: {e}")
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
base_branch = args.base_branch.split('/')[2]
|
||||
promoted_label = 'promoted-to-master'
|
||||
repo_name = args.repo
|
||||
if 'scylla-enterprise' in args.repo:
|
||||
promoted_label = 'promoted-to-enterprise'
|
||||
stable_branch = base_branch
|
||||
backport_branch = 'branch-'
|
||||
|
||||
backport_label_pattern = re.compile(r'backport/\d+\.\d+$')
|
||||
|
||||
g = Github(github_token)
|
||||
repo = g.get_repo(repo_name)
|
||||
closed_prs = []
|
||||
start_commit = None
|
||||
|
||||
if args.commits:
|
||||
start_commit, end_commit = args.commits.split('..')
|
||||
commits = repo.compare(start_commit, end_commit).commits
|
||||
for commit in commits:
|
||||
match = re.search(rf"Closes .*#([0-9]+)", commit.commit.message, re.IGNORECASE)
|
||||
if match:
|
||||
pr_number = int(match.group(1))
|
||||
pr = repo.get_pull(pr_number)
|
||||
closed_prs.append(pr)
|
||||
if args.pull_request:
|
||||
start_commit = args.head_commit
|
||||
pr = repo.get_pull(args.pull_request)
|
||||
closed_prs = [pr]
|
||||
|
||||
for pr in closed_prs:
|
||||
labels = [label.name for label in pr.labels]
|
||||
backport_labels = [label for label in labels if backport_label_pattern.match(label)]
|
||||
if promoted_label not in labels:
|
||||
print(f'no {promoted_label} label: {pr.number}')
|
||||
continue
|
||||
if not backport_labels:
|
||||
print(f'no backport label: {pr.number}')
|
||||
continue
|
||||
commits = get_pr_commits(repo, pr, stable_branch, start_commit)
|
||||
logging.info(f"Found PR #{pr.number} with commit {commits} and the following labels: {backport_labels}")
|
||||
for backport_label in backport_labels:
|
||||
version = backport_label.replace('backport/', '')
|
||||
backport_base_branch = backport_label.replace('backport/', backport_branch)
|
||||
backport(repo, pr, version, commits, backport_base_branch)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
87
.github/scripts/label_promoted_commits.py
vendored
87
.github/scripts/label_promoted_commits.py
vendored
@@ -1,87 +0,0 @@
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
from github import Github
|
||||
from github.GithubException import UnknownObjectException
|
||||
|
||||
try:
|
||||
github_token = os.environ["GITHUB_TOKEN"]
|
||||
except KeyError:
|
||||
print("Please set the 'GITHUB_TOKEN' environment variable")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parser():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--repository', type=str, required=True,
|
||||
help='Github repository name (e.g., scylladb/scylladb)')
|
||||
parser.add_argument('--commits', type=str, required=True, help='Range of promoted commits.')
|
||||
parser.add_argument('--label', type=str, default='promoted-to-master', help='Label to use')
|
||||
parser.add_argument('--ref', type=str, required=True, help='PR target branch')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def add_comment_and_close_pr(pr, comment):
|
||||
if pr.state == 'open':
|
||||
pr.create_issue_comment(comment)
|
||||
pr.edit(state="closed")
|
||||
|
||||
|
||||
def mark_backport_done(repo, ref_pr_number, branch):
|
||||
pr = repo.get_pull(int(ref_pr_number))
|
||||
label_to_remove = f'backport/{branch}'
|
||||
label_to_add = f'{label_to_remove}-done'
|
||||
current_labels = [label.name for label in pr.get_labels()]
|
||||
if label_to_remove in current_labels:
|
||||
pr.remove_from_labels(label_to_remove)
|
||||
if label_to_add not in current_labels:
|
||||
pr.add_to_labels(label_to_add)
|
||||
|
||||
|
||||
def main():
|
||||
# This script is triggered by a push event to either the master branch or a branch named branch-x.y (where x and y represent version numbers). Based on the pushed branch, the script performs the following actions:
|
||||
# - When ref branch is `master`, it will add the `promoted-to-master` label, which we need later for the auto backport process
|
||||
# - When ref branch is `branch-x.y` (which means we backported a patch), it will replace in the original PR the `backport/x.y` label with `backport/x.y-done` and will close the backport PR (Since GitHub close only the one referring to default branch)
|
||||
args = parser()
|
||||
pr_pattern = re.compile(r'Closes .*#([0-9]+)')
|
||||
target_branch = re.search(r'branch-(\d+\.\d+)', args.ref)
|
||||
g = Github(github_token)
|
||||
repo = g.get_repo(args.repository, lazy=False)
|
||||
start_commit, end_commit = args.commits.split('..')
|
||||
commits = repo.compare(start_commit, end_commit).commits
|
||||
processed_prs = set()
|
||||
# Print commit information
|
||||
for commit in commits:
|
||||
print(f'Commit sha is: {commit.sha}')
|
||||
match = pr_pattern.search(commit.commit.message)
|
||||
if match:
|
||||
pr_number = int(match.group(1))
|
||||
if pr_number in processed_prs:
|
||||
continue
|
||||
if target_branch:
|
||||
pr = repo.get_pull(pr_number)
|
||||
branch_name = target_branch[1]
|
||||
refs_pr = re.findall(r'Parent PR: (?:#|https.*?)(\d+)', pr.body)
|
||||
if refs_pr:
|
||||
print(f'branch-{target_branch.group(1)}, pr number is: {pr_number}')
|
||||
# 1. change the backport label of the parent PR to note that
|
||||
# we've merged the corresponding backport PR
|
||||
# 2. close the backport PR and leave a comment on it to note
|
||||
# that it has been merged with a certain git commit.
|
||||
ref_pr_number = refs_pr[0]
|
||||
mark_backport_done(repo, ref_pr_number, branch_name)
|
||||
comment = f'Closed via {commit.sha}'
|
||||
add_comment_and_close_pr(pr, comment)
|
||||
else:
|
||||
try:
|
||||
pr = repo.get_pull(pr_number)
|
||||
pr.add_to_labels('promoted-to-master')
|
||||
print(f'master branch, pr number is: {pr_number}')
|
||||
except UnknownObjectException:
|
||||
print(f'{pr_number} is not a PR but an issue, no need to add label')
|
||||
processed_prs.add(pr_number)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
95
.github/scripts/sync_labels.py
vendored
95
.github/scripts/sync_labels.py
vendored
@@ -1,95 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
from github import Github
|
||||
import re
|
||||
|
||||
try:
|
||||
github_token = os.environ["GITHUB_TOKEN"]
|
||||
except KeyError:
|
||||
print("Please set the 'GITHUB_TOKEN' environment variable")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parser():
|
||||
parse = argparse.ArgumentParser()
|
||||
parse.add_argument('--repo', type=str, required=True, help='Github repository name (e.g., scylladb/scylladb)')
|
||||
parse.add_argument('--number', type=int, required=True, help='Pull request or issue number to sync labels from')
|
||||
parse.add_argument('--label', type=str, default=None, help='Label to add/remove from an issue or PR')
|
||||
parse.add_argument('--is_issue', action='store_true', help='Determined if label change is in Issue or not')
|
||||
parse.add_argument('--action', type=str, choices=['opened', 'labeled', 'unlabeled'], required=True, help='Sync labels action')
|
||||
return parse.parse_args()
|
||||
|
||||
|
||||
def copy_labels_from_linked_issues(repo, pr_number):
|
||||
pr = repo.get_pull(pr_number)
|
||||
if pr.body:
|
||||
linked_issue_numbers = set(re.findall(r'Fixes:? (?:#|https.*?/issues/)(\d+)', pr.body))
|
||||
for issue_number in linked_issue_numbers:
|
||||
try:
|
||||
issue = repo.get_issue(int(issue_number))
|
||||
for label in issue.labels:
|
||||
pr.add_to_labels(label.name)
|
||||
print(f"Labels from issue #{issue_number} copied to PR #{pr_number}")
|
||||
except Exception as e:
|
||||
print(f"Error processing issue #{issue_number}: {e}")
|
||||
|
||||
|
||||
def get_linked_pr_from_issue_number(repo, number):
|
||||
linked_prs = []
|
||||
for pr in repo.get_pulls(state='all', base='master'):
|
||||
if pr.body and f'{number}' in pr.body:
|
||||
linked_prs.append(pr.number)
|
||||
break
|
||||
else:
|
||||
continue
|
||||
return linked_prs
|
||||
|
||||
|
||||
def get_linked_issues_based_on_pr_body(repo, number):
|
||||
pr = repo.get_pull(number)
|
||||
repo_name = repo.full_name
|
||||
pattern = rf"(?:fix(?:|es|ed)|resolve(?:|d|s))\s*:?\s*(?:(?:(?:{repo_name})?#)|https://github\.com/{repo_name}/issues/)(\d+)"
|
||||
issue_number_from_pr_body = []
|
||||
if pr.body is None:
|
||||
return issue_number_from_pr_body
|
||||
matches = re.findall(pattern, pr.body, re.IGNORECASE)
|
||||
if matches:
|
||||
for match in matches:
|
||||
issue_number_from_pr_body.append(match)
|
||||
print(f"Found issue number: {match}")
|
||||
return issue_number_from_pr_body
|
||||
|
||||
|
||||
def sync_labels(repo, number, label, action, is_issue=False):
|
||||
if is_issue:
|
||||
linked_prs_or_issues = get_linked_pr_from_issue_number(repo, number)
|
||||
else:
|
||||
linked_prs_or_issues = get_linked_issues_based_on_pr_body(repo, number)
|
||||
for pr_or_issue_number in linked_prs_or_issues:
|
||||
if is_issue:
|
||||
target = repo.get_issue(pr_or_issue_number)
|
||||
else:
|
||||
target = repo.get_issue(int(pr_or_issue_number))
|
||||
if action == 'labeled':
|
||||
target.add_to_labels(label)
|
||||
print(f"Label '{label}' successfully added.")
|
||||
elif action == 'unlabeled':
|
||||
target.remove_from_labels(label)
|
||||
print(f"Label '{label}' successfully removed.")
|
||||
elif action == 'opened':
|
||||
copy_labels_from_linked_issues(repo, number)
|
||||
else:
|
||||
print("Invalid action. Use 'labeled', 'unlabeled' or 'opened'.")
|
||||
|
||||
|
||||
def main():
|
||||
args = parser()
|
||||
github = Github(github_token)
|
||||
repo = github.get_repo(args.repo)
|
||||
sync_labels(repo, args.number, args.label, args.action, args.is_issue)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
71
.github/workflows/add-label-when-promoted.yaml
vendored
71
.github/workflows/add-label-when-promoted.yaml
vendored
@@ -1,71 +0,0 @@
|
||||
name: Check if commits are promoted
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- branch-*.*
|
||||
- enterprise
|
||||
pull_request_target:
|
||||
types: [labeled]
|
||||
branches: [master, next, enterprise]
|
||||
|
||||
jobs:
|
||||
check-commit:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- name: Set Default Branch
|
||||
id: set_branch
|
||||
run: |
|
||||
if [[ "${{ github.repository }}" == *enterprise* ]]; then
|
||||
echo "DEFAULT_BRANCH=enterprise" >> $GITHUB_ENV
|
||||
else
|
||||
echo "DEFAULT_BRANCH=master" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
fetch-depth: 0 # Fetch all history for all tags and branches
|
||||
- name: Set up Git identity
|
||||
run: |
|
||||
git config --global user.name "GitHub Action"
|
||||
git config --global user.email "action@github.com"
|
||||
git config --global merge.conflictstyle diff3
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get install -y python3-github python3-git
|
||||
- name: Run python script
|
||||
if: github.event_name == 'push'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
run: python .github/scripts/label_promoted_commits.py --commits ${{ github.event.before }}..${{ github.sha }} --repository ${{ github.repository }} --ref ${{ github.ref }}
|
||||
- name: Run auto-backport.py when promotion completed
|
||||
if: ${{ github.event_name == 'push' && github.ref == format('refs/heads/{0}', env.DEFAULT_BRANCH) }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
run: python .github/scripts/auto-backport.py --repo ${{ github.repository }} --base-branch ${{ github.ref }} --commits ${{ github.event.before }}..${{ github.sha }}
|
||||
- name: Check if label starts with 'backport/' and contains digits
|
||||
id: check_label
|
||||
run: |
|
||||
label_name="${{ github.event.label.name }}"
|
||||
if [[ "$label_name" =~ ^backport/[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Label matches backport/X.X pattern."
|
||||
echo "backport_label=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Label does not match the required pattern."
|
||||
echo "backport_label=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: Run auto-backport.py when label was added
|
||||
if: ${{ github.event_name == 'pull_request_target' && steps.check_label.outputs.backport_label == 'true' && github.event.pull_request.state == 'closed' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
run: python .github/scripts/auto-backport.py --repo ${{ github.repository }} --base-branch ${{ github.ref }} --pull-request ${{ github.event.pull_request.number }} --head-commit ${{ github.event.pull_request.base.sha }}
|
||||
@@ -1,26 +0,0 @@
|
||||
name: Fixes validation for backport PR
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, edited]
|
||||
branches: [branch-*]
|
||||
|
||||
jobs:
|
||||
check-fixes-prefix:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check PR body for "Fixes" prefix patterns
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const body = context.payload.pull_request.body;
|
||||
const repo = context.payload.repository.full_name;
|
||||
|
||||
// Regular expression pattern to check for "Fixes" prefix
|
||||
// Adjusted to dynamically insert the repository full name
|
||||
const pattern = `Fixes:? (?:#|${repo.replace('/', '\\/')}#|https://github\\.com/${repo.replace('/', '\\/')}/issues/)(\\d+)`;
|
||||
const regex = new RegExp(pattern);
|
||||
|
||||
if (!regex.test(body)) {
|
||||
core.setFailed("PR body does not contain a valid 'Fixes' reference.");
|
||||
}
|
||||
35
.github/workflows/build-scylla.yaml
vendored
35
.github/workflows/build-scylla.yaml
vendored
@@ -1,35 +0,0 @@
|
||||
name: Build Scylla
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_mode:
|
||||
description: 'the build mode'
|
||||
type: string
|
||||
required: true
|
||||
outputs:
|
||||
md5sum:
|
||||
description: 'the md5sum for scylla executable'
|
||||
value: ${{ jobs.build.outputs.md5sum }}
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
# be consistent with tools/toolchain/image
|
||||
container: scylladb/scylla-toolchain:fedora-40-20240621
|
||||
outputs:
|
||||
md5sum: ${{ steps.checksum.outputs.md5sum }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Generate the building system
|
||||
run: |
|
||||
git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
./configure.py --mode ${{ inputs.build_mode }} --with scylla
|
||||
- run: |
|
||||
ninja build/${{ inputs.build_mode }}/scylla
|
||||
- id: checksum
|
||||
run: |
|
||||
checksum=$(md5sum build/${{ inputs.build_mode }}/scylla | cut -c -32)
|
||||
echo "md5sum=$checksum" >> $GITHUB_OUTPUT
|
||||
65
.github/workflows/clang-nightly.yaml
vendored
65
.github/workflows/clang-nightly.yaml
vendored
@@ -1,65 +0,0 @@
|
||||
name: clang-nightly
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# only at 5AM Saturday
|
||||
- cron: '0 5 * * SAT'
|
||||
|
||||
env:
|
||||
# use the development branch explicitly
|
||||
CLANG_VERSION: 19
|
||||
BUILD_DIR: build
|
||||
|
||||
permissions: {}
|
||||
|
||||
# cancel the in-progress run upon a repush
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
clang-dev:
|
||||
name: Build with clang nightly
|
||||
runs-on: ubuntu-latest
|
||||
container: fedora:40
|
||||
strategy:
|
||||
matrix:
|
||||
build_type:
|
||||
- Debug
|
||||
- RelWithDebInfo
|
||||
- Dev
|
||||
steps:
|
||||
- run: |
|
||||
sudo dnf -y install git
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
# use the copr repo for llvm snapshot builds, see
|
||||
# https://copr.fedorainfracloud.org/coprs/g/fedora-llvm-team/llvm-snapshots/
|
||||
sudo dnf -y install 'dnf-command(copr)'
|
||||
sudo dnf copr enable -y @fedora-llvm-team/llvm-snapshots
|
||||
# do not install java dependencies, which is not only not used here
|
||||
sed -i.orig \
|
||||
-e '/tools\/.*\/install-dependencies.sh/d' \
|
||||
-e 's/(minio_download_jobs)/(true)/' \
|
||||
./install-dependencies.sh
|
||||
sudo ./install-dependencies.sh
|
||||
sudo dnf -y install lld
|
||||
- name: Generate the building system
|
||||
run: |
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||
-DCMAKE_C_COMPILER=clang-$CLANG_VERSION \
|
||||
-DCMAKE_CXX_COMPILER=clang++-$CLANG_VERSION \
|
||||
-G Ninja \
|
||||
-B $BUILD_DIR \
|
||||
-S .
|
||||
# see https://github.com/actions/toolkit/blob/main/docs/problem-matchers.md
|
||||
- run: |
|
||||
echo "::add-matcher::.github/clang-matcher.json"
|
||||
- run: |
|
||||
cmake --build $BUILD_DIR --target scylla
|
||||
- run: |
|
||||
echo "::remove-matcher owner=clang::"
|
||||
67
.github/workflows/clang-tidy.yaml
vendored
67
.github/workflows/clang-tidy.yaml
vendored
@@ -1,67 +0,0 @@
|
||||
name: clang-tidy
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**/*.rst'
|
||||
- '**/*.md'
|
||||
- 'docs/**'
|
||||
- '.github/**'
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# only at 5AM Saturday
|
||||
- cron: '0 5 * * SAT'
|
||||
|
||||
env:
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
BUILD_DIR: build
|
||||
CLANG_TIDY_CHECKS: '-*,bugprone-use-after-move'
|
||||
|
||||
permissions: {}
|
||||
|
||||
# cancel the in-progress run upon a repush
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
uses: ./.github/workflows/read-toolchain.yaml
|
||||
clang-tidy:
|
||||
name: Run clang-tidy
|
||||
needs:
|
||||
- read-toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{ needs.read-toolchain.outputs.image }}
|
||||
steps:
|
||||
- env:
|
||||
IMAGE: ${{ needs.read-toolchain.image }}
|
||||
run: |
|
||||
echo ${{ needs.read-toolchain.image }}
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- run: |
|
||||
sudo dnf -y install clang-tools-extra
|
||||
- name: Generate the building system
|
||||
run: |
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DScylla_USE_LINKER=ld.lld \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
|
||||
-DCMAKE_CXX_CLANG_TIDY="clang-tidy;--checks=$CLANG_TIDY_CHECKS" \
|
||||
-G Ninja \
|
||||
-B $BUILD_DIR \
|
||||
-S .
|
||||
# see https://github.com/actions/toolkit/blob/main/docs/problem-matchers.md
|
||||
- run: |
|
||||
echo "::add-matcher::.github/clang-matcher.json"
|
||||
- name: Build with clang-tidy enabled
|
||||
run: |
|
||||
cmake --build $BUILD_DIR --target scylla
|
||||
- run: |
|
||||
echo "::remove-matcher owner=clang::"
|
||||
17
.github/workflows/codespell.yaml
vendored
17
.github/workflows/codespell.yaml
vendored
@@ -1,17 +0,0 @@
|
||||
name: codespell
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
permissions: {}
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
only_warn: 1
|
||||
ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison"
|
||||
skip: "./.git,./build,./tools,*.js,*.lock,./test,./licenses,./redis/lolwut.cc,*.svg"
|
||||
43
.github/workflows/docs-pages.yaml
vendored
43
.github/workflows/docs-pages.yaml
vendored
@@ -1,43 +0,0 @@
|
||||
name: "Docs / Publish"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
env:
|
||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||
DEFAULT_BRANCH: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'master' }}
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'enterprise'
|
||||
- 'branch-**'
|
||||
paths:
|
||||
- "docs/**"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Set up env
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" multiversion
|
||||
- name: Build redirects
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" redirects
|
||||
- name: Deploy docs to GitHub Pages
|
||||
run: ./docs/_utils/deploy.sh
|
||||
if: (github.ref_name == 'master' && env.FLAG == 'opensource') || (github.ref_name == 'enterprise' && env.FLAG == 'enterprise')
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
32
.github/workflows/docs-pr.yaml
vendored
32
.github/workflows/docs-pr.yaml
vendored
@@ -1,32 +0,0 @@
|
||||
name: "Docs / Build PR"
|
||||
# For more information,
|
||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||
|
||||
env:
|
||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- enterprise
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Set up env
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||
- name: Build docs
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" test
|
||||
80
.github/workflows/iwyu.yaml
vendored
80
.github/workflows/iwyu.yaml
vendored
@@ -1,80 +0,0 @@
|
||||
name: iwyu
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
BUILD_DIR: build
|
||||
CLEANER_OUTPUT_PATH: build/clang-include-cleaner.log
|
||||
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction
|
||||
|
||||
permissions: {}
|
||||
|
||||
# cancel the in-progress run upon a repush
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
uses: ./.github/workflows/read-toolchain.yaml
|
||||
clang-include-cleaner:
|
||||
name: "Analyze #includes in source files"
|
||||
needs:
|
||||
- read-toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{ needs.read-toolchain.outputs.image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- run: |
|
||||
sudo dnf -y install clang-tools-extra
|
||||
- name: Generate compilation database
|
||||
run: |
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
|
||||
-G Ninja \
|
||||
-B $BUILD_DIR \
|
||||
-S .
|
||||
- name: Build headers
|
||||
run: |
|
||||
swagger_targets=''
|
||||
for f in api/api-doc/*.json; do
|
||||
if test "${f#*.}" = json; then
|
||||
name=$(basename "$f" .json)
|
||||
if test $name != swagger20_header; then
|
||||
swagger_targets+=" scylla_swagger_gen_$name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
cmake \
|
||||
--build build \
|
||||
--target seastar_http_request_parser \
|
||||
--target idl-sources \
|
||||
--target $swagger_targets
|
||||
- run: |
|
||||
echo "::add-matcher::.github/clang-include-cleaner.json"
|
||||
- name: clang-include-cleaner
|
||||
run: |
|
||||
for d in $CLEANER_DIRS; do
|
||||
find $d -name '*.cc' -o -name '*.hh' \
|
||||
-exec echo {} \; \
|
||||
-exec clang-include-cleaner \
|
||||
--ignore-headers=seastarx.hh \
|
||||
--print=changes \
|
||||
-p $BUILD_DIR \
|
||||
{} \; | tee --append $CLEANER_OUTPUT_PATH
|
||||
done
|
||||
- run: |
|
||||
echo "::remove-matcher owner=clang-include-cleaner::"
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Logs (clang-include-cleaner)
|
||||
path: "./${{ env.CLEANER_OUTPUT_PATH }}"
|
||||
22
.github/workflows/pr-require-backport-label.yaml
vendored
22
.github/workflows/pr-require-backport-label.yaml
vendored
@@ -1,22 +0,0 @@
|
||||
name: PR require backport label
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
jobs:
|
||||
label:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
with:
|
||||
mode: minimum
|
||||
count: 1
|
||||
labels: "backport/none\nbackport/\\d.\\d"
|
||||
use_regex: true
|
||||
add_comment: false
|
||||
23
.github/workflows/read-toolchain.yaml
vendored
23
.github/workflows/read-toolchain.yaml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Read Toolchain
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
outputs:
|
||||
image:
|
||||
description: "the toolchain docker image"
|
||||
value: ${{ jobs.read-toolchain.outputs.image }}
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
image: ${{ steps.read.outputs.image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: tools/toolchain/image
|
||||
sparse-checkout-cone-mode: false
|
||||
- id: read
|
||||
run: |
|
||||
image=$(cat tools/toolchain/image)
|
||||
echo "image=$image" >> $GITHUB_OUTPUT
|
||||
34
.github/workflows/reproducible-build.yaml
vendored
34
.github/workflows/reproducible-build.yaml
vendored
@@ -1,34 +0,0 @@
|
||||
name: Check Reproducible Build
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# 5AM every friday
|
||||
- cron: '0 5 * * FRI'
|
||||
|
||||
permissions: {}
|
||||
|
||||
env:
|
||||
BUILD_MODE: release
|
||||
jobs:
|
||||
build-a:
|
||||
uses: ./.github/workflows/build-scylla.yaml
|
||||
with:
|
||||
build_mode: release
|
||||
build-b:
|
||||
uses: ./.github/workflows/build-scylla.yaml
|
||||
with:
|
||||
build_mode: release
|
||||
compare-checksum:
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build-a
|
||||
- build-b
|
||||
steps:
|
||||
- env:
|
||||
CHECKSUM_A: ${{needs.build-a.outputs.md5sum}}
|
||||
CHECKSUM_B: ${{needs.build-b.outputs.md5sum}}
|
||||
run: |
|
||||
if [ $CHECKSUM_A != $CHECKSUM_B ]; then \
|
||||
echo "::error::mismatched checksums: $CHECKSUM_A != $CHECKSUM_B"; \
|
||||
exit 1; \
|
||||
fi
|
||||
50
.github/workflows/seastar.yaml
vendored
50
.github/workflows/seastar.yaml
vendored
@@ -1,50 +0,0 @@
|
||||
name: Build with the latest Seastar
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# 5AM everyday
|
||||
- cron: '0 5 * * *'
|
||||
|
||||
permissions: {}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
BUILD_DIR: build
|
||||
|
||||
jobs:
|
||||
build-with-the-latest-seastar:
|
||||
runs-on: ubuntu-latest
|
||||
# be consistent with tools/toolchain/image
|
||||
container: scylladb/scylla-toolchain:fedora-40-20240621
|
||||
strategy:
|
||||
matrix:
|
||||
build_type:
|
||||
- Debug
|
||||
- RelWithDebInfo
|
||||
- Dev
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- run: |
|
||||
rm -rf seastar
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: scylladb/seastar
|
||||
submodules: true
|
||||
path: seastar
|
||||
- name: Generate the building system
|
||||
run: |
|
||||
git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-G Ninja \
|
||||
-B $BUILD_DIR \
|
||||
-S .
|
||||
- run: |
|
||||
cmake --build $BUILD_DIR --target scylla
|
||||
49
.github/workflows/sync-labels.yaml
vendored
49
.github/workflows/sync-labels.yaml
vendored
@@ -1,49 +0,0 @@
|
||||
name: Sync labels
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, labeled, unlabeled]
|
||||
branches: [master, next]
|
||||
issues:
|
||||
types: [labeled, unlabeled]
|
||||
|
||||
jobs:
|
||||
label-sync:
|
||||
if: ${{ github.repository == 'scylladb/scylladb' }}
|
||||
name: Synchronize labels between PR and the issue(s) fixed by it
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Dump GitHub context
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/scripts/sync_labels.py
|
||||
sparse-checkout-cone-mode: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get install -y python3-github
|
||||
|
||||
- name: Pull request opened event
|
||||
if: ${{ github.event.action == 'opened' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: python .github/scripts/sync_labels.py --repo ${{ github.repository }} --number ${{ github.event.number }} --action ${{ github.event.action }}
|
||||
|
||||
- name: Pull request labeled or unlabeled event
|
||||
if: github.event_name == 'pull_request_target' && startsWith(github.event.label.name, 'backport/')
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: python .github/scripts/sync_labels.py --repo ${{ github.repository }} --number ${{ github.event.number }} --action ${{ github.event.action }} --label ${{ github.event.label.name }}
|
||||
|
||||
- name: Issue labeled or unlabeled event
|
||||
if: github.event_name == 'issues' && startsWith(github.event.label.name, 'backport/')
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: python .github/scripts/sync_labels.py --repo ${{ github.repository }} --number ${{ github.event.issue.number }} --action ${{ github.event.action }} --is_issue --label ${{ github.event.label.name }}
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -3,7 +3,6 @@
|
||||
.settings
|
||||
build
|
||||
build.ninja
|
||||
build.ninja.new
|
||||
cscope.*
|
||||
/debian/
|
||||
dist/ami/files/*.rpm
|
||||
@@ -19,16 +18,3 @@ CMakeLists.txt.user
|
||||
*.egg-info
|
||||
__pycache__CMakeLists.txt.user
|
||||
.gdbinit
|
||||
/resources
|
||||
.pytest_cache
|
||||
/expressions.tokens
|
||||
tags
|
||||
!db/tags/
|
||||
testlog
|
||||
test/*/*.reject
|
||||
.vscode
|
||||
compile_commands.json
|
||||
.ccls-cache/
|
||||
.mypy_cache
|
||||
.envrc
|
||||
clang_build
|
||||
|
||||
18
.gitmodules
vendored
18
.gitmodules
vendored
@@ -6,18 +6,6 @@
|
||||
path = swagger-ui
|
||||
url = ../scylla-swagger-ui
|
||||
ignore = dirty
|
||||
[submodule "abseil"]
|
||||
path = abseil
|
||||
url = ../abseil-cpp
|
||||
[submodule "scylla-jmx"]
|
||||
path = tools/jmx
|
||||
url = ../scylla-jmx
|
||||
[submodule "scylla-tools"]
|
||||
path = tools/java
|
||||
url = ../scylla-tools-java
|
||||
[submodule "scylla-python3"]
|
||||
path = tools/python3
|
||||
url = ../scylla-python3
|
||||
[submodule "tools/cqlsh"]
|
||||
path = tools/cqlsh
|
||||
url = ../scylla-cqlsh
|
||||
[submodule "dist/ami/files/scylla-ami"]
|
||||
path = dist/ami/files/scylla-ami
|
||||
url = ../scylla-ami
|
||||
|
||||
3
.mailmap
3
.mailmap
@@ -1,3 +0,0 @@
|
||||
Avi Kivity <avi@scylladb.com> Avi Kivity' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Raphael S. Carvalho <raphaelsc@scylladb.com> Raphael S. Carvalho' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
Pavel Emelyanov <xemul@scylladb.com> Pavel Emelyanov' via ScyllaDB development <scylladb-dev@googlegroups.com>
|
||||
377
CMakeLists.txt
377
CMakeLists.txt
@@ -1,281 +1,140 @@
|
||||
cmake_minimum_required(VERSION 3.27)
|
||||
##
|
||||
## For best results, first compile the project using the Ninja build-system.
|
||||
##
|
||||
|
||||
cmake_minimum_required(VERSION 3.7)
|
||||
project(scylla)
|
||||
|
||||
include(CTest)
|
||||
if (NOT DEFINED FOR_IDE AND NOT DEFINED ENV{FOR_IDE} AND NOT DEFINED ENV{CLION_IDE})
|
||||
message(FATAL_ERROR "This CMakeLists.txt file is only valid for use in IDEs, please define FOR_IDE to acknowledge this.")
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/seastar/cmake)
|
||||
# Default value. A more accurate list is populated through `pkg-config` below if `seastar.pc` is available.
|
||||
set(SEASTAR_INCLUDE_DIRS "seastar")
|
||||
|
||||
# Set the possible values of build type for cmake-gui
|
||||
set(scylla_build_types
|
||||
"Debug" "RelWithDebInfo" "Dev" "Sanitize" "Coverage")
|
||||
if(DEFINED CMAKE_BUILD_TYPE)
|
||||
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS
|
||||
${scylla_build_types})
|
||||
if(NOT CMAKE_BUILD_TYPE)
|
||||
set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE
|
||||
STRING "Choose the type of build." FORCE)
|
||||
message(WARNING "CMAKE_BUILD_TYPE not specified, Using 'RelWithDebInfo'")
|
||||
elseif(NOT CMAKE_BUILD_TYPE IN_LIST scylla_build_types)
|
||||
message(FATAL_ERROR "Unknown CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}. "
|
||||
"Following types are supported: ${scylla_build_types}")
|
||||
endif()
|
||||
endif(DEFINED CMAKE_BUILD_TYPE)
|
||||
# These paths are always available, since they're included in the repository. Additional DPDK headers are placed while
|
||||
# Seastar is built, and are captured in `SEASTAR_INCLUDE_DIRS` through parsing the Seastar pkg-config file (below).
|
||||
set(SEASTAR_DPDK_INCLUDE_DIRS
|
||||
seastar/dpdk/lib/librte_eal/common/include
|
||||
seastar/dpdk/lib/librte_eal/common/include/generic
|
||||
seastar/dpdk/lib/librte_eal/common/include/x86
|
||||
seastar/dpdk/lib/librte_ether)
|
||||
|
||||
include(mode.common)
|
||||
if(CMAKE_CONFIGURATION_TYPES)
|
||||
foreach(config ${CMAKE_CONFIGURATION_TYPES})
|
||||
include(mode.${config})
|
||||
list(APPEND scylla_build_modes ${scylla_build_mode_${config}})
|
||||
find_package(PkgConfig REQUIRED)
|
||||
|
||||
set(ENV{PKG_CONFIG_PATH} "${CMAKE_SOURCE_DIR}/seastar/build/release:$ENV{PKG_CONFIG_PATH}")
|
||||
pkg_check_modules(SEASTAR seastar)
|
||||
|
||||
find_package(Boost COMPONENTS filesystem program_options system thread)
|
||||
|
||||
##
|
||||
## Populate the names of all source and header files in the indicated paths in a designated variable.
|
||||
##
|
||||
## When RECURSIVE is specified, directories are traversed recursively.
|
||||
##
|
||||
## Use: scan_scylla_source_directories(VAR my_result_var [RECURSIVE] PATHS [path1 path2 ...])
|
||||
##
|
||||
function (scan_scylla_source_directories)
|
||||
set(options RECURSIVE)
|
||||
set(oneValueArgs VAR)
|
||||
set(multiValueArgs PATHS)
|
||||
cmake_parse_arguments(args "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
|
||||
set(globs "")
|
||||
|
||||
foreach (dir ${args_PATHS})
|
||||
list(APPEND globs "${dir}/*.cc" "${dir}/*.hh")
|
||||
endforeach()
|
||||
add_custom_target(mode_list
|
||||
COMMAND ${CMAKE_COMMAND} -E echo "$<JOIN:${scylla_build_modes}, >"
|
||||
COMMENT "List configured modes"
|
||||
BYPRODUCTS mode-list.phony.stamp
|
||||
COMMAND_EXPAND_LISTS)
|
||||
else()
|
||||
include(mode.${CMAKE_BUILD_TYPE})
|
||||
add_custom_target(mode_list
|
||||
${CMAKE_COMMAND} -E echo "${scylla_build_mode}"
|
||||
COMMENT "List configured modes")
|
||||
endif()
|
||||
|
||||
include(limit_jobs)
|
||||
# Configure Seastar compile options to align with Scylla
|
||||
set(CMAKE_CXX_STANDARD "23" CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_EXTENSIONS ON CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_SCAN_FOR_MODULES OFF CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
|
||||
if (args_RECURSIVE)
|
||||
set(glob_kind GLOB_RECURSE)
|
||||
else()
|
||||
set(glob_kind GLOB)
|
||||
endif()
|
||||
|
||||
set(Seastar_TESTING ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_API_LEVEL 7 CACHE STRING "" FORCE)
|
||||
set(Seastar_DEPRECATED_OSTREAM_FORMATTERS OFF CACHE BOOL "" FORCE)
|
||||
set(Seastar_APPS ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_EXCLUDE_TESTS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_UNUSED_RESULT_ERROR ON CACHE BOOL "" FORCE)
|
||||
add_subdirectory(seastar)
|
||||
set(ABSL_PROPAGATE_CXX_STD ON CACHE BOOL "" FORCE)
|
||||
file(${glob_kind} var
|
||||
${globs})
|
||||
|
||||
find_package(Sanitizers QUIET)
|
||||
set(sanitizer_cxx_flags
|
||||
$<$<IN_LIST:$<CONFIG>,Debug;Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>>)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(ABSL_GCC_FLAGS ${sanitizer_cxx_flags})
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
set(ABSL_LLVM_FLAGS ${sanitizer_cxx_flags})
|
||||
endif()
|
||||
set(ABSL_DEFAULT_LINKOPTS
|
||||
$<$<IN_LIST:$<CONFIG>,Debug;Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_LINK_LIBRARIES>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_LINK_LIBRARIES>>)
|
||||
add_subdirectory(abseil)
|
||||
add_library(absl-headers INTERFACE)
|
||||
target_include_directories(absl-headers SYSTEM INTERFACE
|
||||
"${PROJECT_SOURCE_DIR}/abseil")
|
||||
add_library(absl::headers ALIAS absl-headers)
|
||||
set(${args_VAR} ${var} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Exclude absl::strerror from the default "all" target since it's not
|
||||
# used in Scylla build and, moreover, makes use of deprecated glibc APIs,
|
||||
# such as sys_nerr, which are not exposed from "stdio.h" since glibc 2.32,
|
||||
# which happens to be the case for recent Fedora distribution versions.
|
||||
#
|
||||
# Need to use the internal "absl_strerror" target name instead of namespaced
|
||||
# variant because `set_target_properties` does not understand the latter form,
|
||||
# unfortunately.
|
||||
set_target_properties(absl_strerror PROPERTIES EXCLUDE_FROM_ALL TRUE)
|
||||
## Although Seastar is an external project, it is common enough to explore the sources while doing
|
||||
## Scylla development that we'll treat the Seastar sources as part of this project for easier navigation.
|
||||
scan_scylla_source_directories(
|
||||
VAR SEASTAR_SOURCE_FILES
|
||||
RECURSIVE
|
||||
|
||||
# System libraries dependencies
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS filesystem program_options system thread regex unit_test_framework)
|
||||
target_link_libraries(Boost::regex
|
||||
INTERFACE
|
||||
ICU::i18n
|
||||
ICU::uc)
|
||||
find_package(Lua REQUIRED)
|
||||
find_package(ZLIB REQUIRED)
|
||||
find_package(ICU COMPONENTS uc i18n REQUIRED)
|
||||
find_package(fmt 9.0.0 REQUIRED)
|
||||
find_package(libdeflate REQUIRED)
|
||||
find_package(libxcrypt REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
find_package(RapidJSON REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
find_package(zstd REQUIRED)
|
||||
PATHS
|
||||
seastar/core
|
||||
seastar/http
|
||||
seastar/json
|
||||
seastar/net
|
||||
seastar/rpc
|
||||
seastar/tests
|
||||
seastar/util)
|
||||
|
||||
set(scylla_gen_build_dir "${CMAKE_BINARY_DIR}/gen")
|
||||
file(MAKE_DIRECTORY "${scylla_gen_build_dir}")
|
||||
scan_scylla_source_directories(
|
||||
VAR SCYLLA_ROOT_SOURCE_FILES
|
||||
PATHS .)
|
||||
|
||||
include(add_version_library)
|
||||
generate_scylla_version()
|
||||
scan_scylla_source_directories(
|
||||
VAR SCYLLA_SUB_SOURCE_FILES
|
||||
RECURSIVE
|
||||
|
||||
add_library(scylla-zstd STATIC
|
||||
zstd.cc)
|
||||
target_link_libraries(scylla-zstd
|
||||
PRIVATE
|
||||
db
|
||||
Seastar::seastar
|
||||
zstd::libzstd)
|
||||
PATHS
|
||||
api
|
||||
auth
|
||||
cql3
|
||||
db
|
||||
dht
|
||||
exceptions
|
||||
gms
|
||||
index
|
||||
io
|
||||
locator
|
||||
message
|
||||
repair
|
||||
service
|
||||
sstables
|
||||
streaming
|
||||
tests
|
||||
thrift
|
||||
tracing
|
||||
transport
|
||||
utils)
|
||||
|
||||
add_library(scylla-main STATIC)
|
||||
target_sources(scylla-main
|
||||
PRIVATE
|
||||
absl-flat_hash_map.cc
|
||||
bytes.cc
|
||||
client_data.cc
|
||||
clocks-impl.cc
|
||||
collection_mutation.cc
|
||||
compress.cc
|
||||
converting_mutation_partition_applier.cc
|
||||
counters.cc
|
||||
direct_failure_detector/failure_detector.cc
|
||||
duration.cc
|
||||
exceptions/exceptions.cc
|
||||
frozen_schema.cc
|
||||
generic_server.cc
|
||||
debug.cc
|
||||
init.cc
|
||||
keys.cc
|
||||
multishard_mutation_query.cc
|
||||
mutation_query.cc
|
||||
partition_slice_builder.cc
|
||||
querier.cc
|
||||
query.cc
|
||||
query_ranges_to_vnodes.cc
|
||||
query-result-set.cc
|
||||
tombstone_gc_options.cc
|
||||
tombstone_gc.cc
|
||||
reader_concurrency_semaphore.cc
|
||||
row_cache.cc
|
||||
schema_mutations.cc
|
||||
serializer.cc
|
||||
sstables_loader.cc
|
||||
table_helper.cc
|
||||
tasks/task_manager.cc
|
||||
timeout_config.cc
|
||||
unimplemented.cc
|
||||
validation.cc
|
||||
vint-serialization.cc)
|
||||
target_link_libraries(scylla-main
|
||||
PRIVATE
|
||||
"$<LINK_LIBRARY:WHOLE_ARCHIVE,scylla-zstd>"
|
||||
db
|
||||
absl::headers
|
||||
absl::btree
|
||||
absl::hash
|
||||
absl::raw_hash_set
|
||||
Seastar::seastar
|
||||
Snappy::snappy
|
||||
systemd
|
||||
ZLIB::ZLIB)
|
||||
scan_scylla_source_directories(
|
||||
VAR SCYLLA_GEN_SOURCE_FILES
|
||||
RECURSIVE
|
||||
PATHS build/release/gen)
|
||||
|
||||
option(Scylla_CHECK_HEADERS
|
||||
"Add check-headers target for checking the self-containness of headers")
|
||||
if(Scylla_CHECK_HEADERS)
|
||||
add_custom_target(check-headers)
|
||||
# compatibility target used by CI, which builds "check-headers" only for
|
||||
# the "Dev" mode.
|
||||
# our CI currently builds "dev-headers" using ninja without specify a build
|
||||
# mode. where "dev" is actually a prefix encoded in the target name for the
|
||||
# underlying "headers" target. while we don't have this convention in CMake
|
||||
# targets. in contrast, the "check-headers" which is built for all
|
||||
# configurations defined by "CMAKE_DEFAULT_CONFIGS". however, we only need
|
||||
# to build "check-headers" for the "Dev" configuration. Therefore, before
|
||||
# updating the CI to use build "check-headers:Dev", let's add a new target
|
||||
# that specifically builds "check-headers" only for Dev configuration. The
|
||||
# new target will do nothing for other configurations.
|
||||
add_custom_target(dev-headers
|
||||
COMMAND ${CMAKE_COMMAND}
|
||||
"$<IF:$<CONFIG:Dev>,--build;${CMAKE_BINARY_DIR};--config;$<CONFIG>;--target;check-headers,-E;echo;skipping;dev-headers;in;$<CONFIG>>"
|
||||
COMMAND_EXPAND_LISTS)
|
||||
endif()
|
||||
|
||||
include(check_headers)
|
||||
check_headers(check-headers scylla-main
|
||||
GLOB ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
|
||||
add_subdirectory(api)
|
||||
add_subdirectory(alternator)
|
||||
add_subdirectory(db)
|
||||
add_subdirectory(auth)
|
||||
add_subdirectory(cdc)
|
||||
add_subdirectory(compaction)
|
||||
add_subdirectory(cql3)
|
||||
add_subdirectory(data_dictionary)
|
||||
add_subdirectory(dht)
|
||||
add_subdirectory(gms)
|
||||
add_subdirectory(idl)
|
||||
add_subdirectory(index)
|
||||
add_subdirectory(lang)
|
||||
add_subdirectory(locator)
|
||||
add_subdirectory(message)
|
||||
add_subdirectory(mutation)
|
||||
add_subdirectory(mutation_writer)
|
||||
add_subdirectory(node_ops)
|
||||
add_subdirectory(readers)
|
||||
add_subdirectory(redis)
|
||||
add_subdirectory(replica)
|
||||
add_subdirectory(raft)
|
||||
add_subdirectory(repair)
|
||||
add_subdirectory(rust)
|
||||
add_subdirectory(schema)
|
||||
add_subdirectory(service)
|
||||
add_subdirectory(sstables)
|
||||
add_subdirectory(streaming)
|
||||
add_subdirectory(test)
|
||||
add_subdirectory(tools)
|
||||
add_subdirectory(tracing)
|
||||
add_subdirectory(transport)
|
||||
add_subdirectory(types)
|
||||
add_subdirectory(utils)
|
||||
add_version_library(scylla_version
|
||||
release.cc)
|
||||
set(SCYLLA_SOURCE_FILES
|
||||
${SCYLLA_ROOT_SOURCE_FILES}
|
||||
${SCYLLA_GEN_SOURCE_FILES}
|
||||
${SCYLLA_SUB_SOURCE_FILES})
|
||||
|
||||
add_executable(scylla
|
||||
main.cc)
|
||||
target_link_libraries(scylla PRIVATE
|
||||
scylla-main
|
||||
api
|
||||
auth
|
||||
alternator
|
||||
db
|
||||
cdc
|
||||
compaction
|
||||
cql3
|
||||
data_dictionary
|
||||
dht
|
||||
gms
|
||||
idl
|
||||
index
|
||||
lang
|
||||
locator
|
||||
message
|
||||
mutation
|
||||
mutation_writer
|
||||
raft
|
||||
readers
|
||||
redis
|
||||
repair
|
||||
replica
|
||||
schema
|
||||
scylla_version
|
||||
service
|
||||
sstables
|
||||
streaming
|
||||
test-perf
|
||||
tools
|
||||
tracing
|
||||
transport
|
||||
types
|
||||
utils)
|
||||
${SEASTAR_SOURCE_FILES}
|
||||
${SCYLLA_SOURCE_FILES})
|
||||
|
||||
target_link_libraries(scylla PRIVATE
|
||||
seastar
|
||||
absl::headers
|
||||
Boost::program_options)
|
||||
# Note that since CLion does not undestand GCC6 concepts, we always disable them (even if users configure otherwise).
|
||||
# CLion seems to have trouble with `-U` (macro undefinition), so we do it this way instead.
|
||||
list(REMOVE_ITEM SEASTAR_CFLAGS "-DHAVE_GCC6_CONCEPTS")
|
||||
|
||||
target_include_directories(scylla PRIVATE
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
"${scylla_gen_build_dir}")
|
||||
# If the Seastar pkg-config information is available, append to the default flags.
|
||||
#
|
||||
# For ease of browsing the source code, we always pretend that DPDK is enabled.
|
||||
target_compile_options(scylla PUBLIC
|
||||
-std=gnu++14
|
||||
-DHAVE_DPDK
|
||||
-DHAVE_HWLOC
|
||||
"${SEASTAR_CFLAGS}")
|
||||
|
||||
add_subdirectory(dist)
|
||||
# The order matters here: prefer the "static" DPDK directories to any dynamic paths from pkg-config. Some files are only
|
||||
# available dynamically, though.
|
||||
target_include_directories(scylla PUBLIC
|
||||
.
|
||||
${SEASTAR_DPDK_INCLUDE_DIRS}
|
||||
${SEASTAR_INCLUDE_DIRS}
|
||||
${Boost_INCLUDE_DIRS}
|
||||
build/release/gen)
|
||||
|
||||
@@ -1,22 +1,11 @@
|
||||
# Contributing to Scylla
|
||||
# Asking questions or requesting help
|
||||
|
||||
## Asking questions or requesting help
|
||||
Use the [ScyllaDB user mailing list](https://groups.google.com/forum/#!forum/scylladb-users) for general questions and help.
|
||||
|
||||
Use the [ScyllaDB Community Forum](https://forum.scylladb.com) or the [Slack workspace](http://slack.scylladb.com) for general questions and help.
|
||||
# Reporting an issue
|
||||
|
||||
Join the [Scylla Developers mailing list](https://groups.google.com/g/scylladb-dev) for deeper technical discussions and to discuss your ideas for contributions.
|
||||
Please use the [Issue Tracker](https://github.com/scylladb/scylla/issues/) to report issues. Fill in as much information as you can in the issue template, especially for performance problems.
|
||||
|
||||
## Reporting an issue
|
||||
# Contributing Code to Scylla
|
||||
|
||||
Please use the [issue tracker](https://github.com/scylladb/scylla/issues/) to report issues or to suggest features. Fill in as much information as you can in the issue template, especially for performance problems.
|
||||
|
||||
## Contributing code to Scylla
|
||||
|
||||
Before you can contribute code to Scylla for the first time, you should sign the [Contributor License Agreement](https://www.scylladb.com/open-source/contributor-agreement/) and send the signed form cla@scylladb.com. You can then submit your changes as patches to the to the [scylladb-dev mailing list](https://groups.google.com/forum/#!forum/scylladb-dev) or as a pull request to the [Scylla project on github](https://github.com/scylladb/scylla).
|
||||
If you need help formatting or sending patches, [check out these instructions](https://github.com/scylladb/scylla/wiki/Formatting-and-sending-patches).
|
||||
|
||||
The Scylla C++ source code uses the [Seastar coding style](https://github.com/scylladb/seastar/blob/master/coding-style.md) so please adhere to that in your patches. Note that Scylla code is written with `using namespace seastar`, so should not explicitly add the `seastar::` prefix to Seastar symbols. You will usually not need to add `using namespace seastar` to new source files, because most Scylla header files have `#include "seastarx.hh"`, which does this.
|
||||
|
||||
Header files in Scylla must be self-contained, i.e., each can be included without having to include specific other headers first. To verify that your change did not break this property, run `ninja dev-headers`. If you added or removed header files, you must `touch configure.py` first - this will cause `configure.py` to be automatically re-run to generate a fresh list of header files.
|
||||
|
||||
For more criteria on what reviewers consider good code, see the [review checklist](https://github.com/scylladb/scylla/blob/master/docs/dev/review-checklist.md).
|
||||
To contribute code to Scylla, you need to sign the [Contributor License Agreement](http://www.scylladb.com/opensource/cla/) and send your changes as [patches](https://github.com/scylladb/scylla/wiki/Formatting-and-sending-patches) to the [mailing list](https://groups.google.com/forum/#!forum/scylladb-dev). We don't accept pull requests on GitHub.
|
||||
|
||||
213
HACKING.md
213
HACKING.md
@@ -18,36 +18,13 @@ $ git submodule update --init --recursive
|
||||
|
||||
### Dependencies
|
||||
|
||||
Scylla is fairly fussy about its build environment, requiring a very recent
|
||||
version of the C++20 compiler and numerous tools and libraries to build.
|
||||
Scylla depends on the system package manager for its development dependencies.
|
||||
|
||||
Run `./install-dependencies.sh` (as root) to use your Linux distributions's
|
||||
package manager to install the appropriate packages on your build machine.
|
||||
However, this will only work on very recent distributions. For example,
|
||||
currently Fedora users must upgrade to Fedora 32 otherwise the C++ compiler
|
||||
will be too old, and not support the new C++20 standard that Scylla uses.
|
||||
|
||||
Alternatively, to avoid having to upgrade your build machine or install
|
||||
various packages on it, we provide another option - the **frozen toolchain**.
|
||||
This is a script, `./tools/toolchain/dbuild`, that can execute build or run
|
||||
commands inside a Docker image that contains exactly the right build tools and
|
||||
libraries. The `dbuild` technique is useful for beginners, but is also the way
|
||||
in which ScyllaDB produces official releases, so it is highly recommended.
|
||||
|
||||
To use `dbuild`, you simply prefix any build or run command with it. Building
|
||||
and running Scylla becomes as easy as:
|
||||
|
||||
```bash
|
||||
$ ./tools/toolchain/dbuild ./configure.py
|
||||
$ ./tools/toolchain/dbuild ninja build/release/scylla
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --developer-mode 1
|
||||
```
|
||||
Running `./install_dependencies.sh` (as root) installs the appropriate packages based on your Linux distribution.
|
||||
|
||||
### Build system
|
||||
|
||||
**Note**: Compiling Scylla requires, conservatively, 2 GB of memory per native
|
||||
thread, and up to 3 GB per native thread while linking. GCC >= 10 is
|
||||
required.
|
||||
**Note**: Compiling Scylla requires, conservatively, 2 GB of memory per native thread, and up to 3 GB per native thread while linking.
|
||||
|
||||
Scylla is built with [Ninja](https://ninja-build.org/), a low-level rule-based system. A Python script, `configure.py`, generates a Ninja file (`build.ninja`) based on configuration options.
|
||||
|
||||
@@ -66,9 +43,11 @@ The full suite of options for project configuration is available via
|
||||
$ ./configure.py --help
|
||||
```
|
||||
|
||||
The most important option is:
|
||||
The most important options are:
|
||||
|
||||
- `--enable-dpdk`: [DPDK](http://dpdk.org/) is a set of libraries and drivers for fast packet processing. During development, it's not necessary to enable support even if it is supported by your platform.
|
||||
- `--mode={release,debug,all}`: Debug mode enables [AddressSanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizer) and allows for debugging with tools like GDB. Debugging builds are generally slower and generate much larger object files than release builds.
|
||||
|
||||
- `--{enable,disable}-dpdk`: [DPDK](http://dpdk.org/) is a set of libraries and drivers for fast packet processing. During development, it's not necessary to enable support even if it is supported by your platform.
|
||||
|
||||
Source files and build targets are tracked manually in `configure.py`, so the script needs to be updated when new files or targets are added or removed.
|
||||
|
||||
@@ -76,30 +55,6 @@ To save time -- for instance, to avoid compiling all unit tests -- you can also
|
||||
|
||||
```bash
|
||||
$ ninja-build build/release/tests/schema_change_test
|
||||
$ ninja-build build/release/service/storage_proxy.o
|
||||
```
|
||||
|
||||
You can also specify a single mode. For example
|
||||
|
||||
```bash
|
||||
$ ninja-build release
|
||||
```
|
||||
|
||||
Will build everytihng in release mode. The valid modes are
|
||||
|
||||
* Debug: Enables [AddressSanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizer)
|
||||
and other sanity checks. It has no optimizations, which allows for debugging with tools like
|
||||
GDB. Debugging builds are generally slower and generate much larger object files than release builds.
|
||||
* Release: Fewer checks and more optimizations. It still has debug info.
|
||||
* Dev: No optimizations or debug info. The objective is to compile and link as fast as possible.
|
||||
This is useful for the first iterations of a patch.
|
||||
|
||||
|
||||
Note that by default unit tests binaries are stripped so they can't be used with gdb or seastar-addr2line.
|
||||
To include debug information in the unit test binary, build the test binary with a `_g` suffix. For example,
|
||||
|
||||
```bash
|
||||
$ ninja-build build/release/tests/schema_change_test_g
|
||||
```
|
||||
|
||||
### Unit testing
|
||||
@@ -128,53 +83,9 @@ The `-c1 -m1G` arguments limit this Seastar-based test to a single system thread
|
||||
|
||||
### Preparing patches
|
||||
|
||||
All changes to Scylla are submitted as patches to the public [mailing list](mailto:scylladb-dev@googlegroups.com). Once a patch is approved by one of the maintainers of the project, it is committed to the maintainers' copy of the repository at https://github.com/scylladb/scylla.
|
||||
All changes to Scylla are submitted as patches to the public mailing list. Once a patch is approved by one of the maintainers of the project, it is committed to the maintainers' copy of the repository at https://github.com/scylladb/scylla.
|
||||
|
||||
Detailed instructions for formatting patches for the mailing list and advice on preparing good patches are available at the [ScyllaDB website](http://docs.scylladb.com/contribute/). There are also some guidelines that can help you make the patch review process smoother:
|
||||
|
||||
1. Before generating patches, make sure your Git configuration points to `.gitorderfile`. You can do it by running
|
||||
|
||||
```bash
|
||||
$ git config diff.orderfile .gitorderfile
|
||||
```
|
||||
|
||||
2. If you are sending more than a single patch, push your changes into a new branch of your fork of Scylla on GitHub and add a URL pointing to this branch to your cover letter.
|
||||
|
||||
3. If you are sending a new revision of an earlier patchset, add a brief summary of changes in this version, for example:
|
||||
```
|
||||
In v3:
|
||||
- declared move constructor and move assignment operator as noexcept
|
||||
- used std::variant instead of a union
|
||||
...
|
||||
```
|
||||
|
||||
4. Add information about the tests run with this fix. It can look like
|
||||
```
|
||||
"Tests: unit ({mode}), dtest ({smp})"
|
||||
```
|
||||
|
||||
The usual is "Tests: unit (dev)", although running debug tests is encouraged.
|
||||
|
||||
5. When answering review comments, prefer inline quotes as they make it easier to track the conversation across multiple e-mails.
|
||||
|
||||
6. The Linux kernel's [Submitting Patches](https://www.kernel.org/doc/html/v4.19/process/submitting-patches.html) document offers excellent advice on how to prepare patches and patchsets for review. Since the Scylla development process is derived from the kernel's, almost all of the advice there is directly applicable.
|
||||
|
||||
### Finding a person to review and merge your patches
|
||||
|
||||
You can use the `scripts/find-maintainer` script to find a subsystem maintainer and/or reviewer for your patches. The script accepts a filename in the git source tree as an argument and outputs a list of subsystems the file belongs to and their respective maintainers and reviewers. For example, if you changed the `cql3/statements/create_view_statement.hh` file, run the script as follows:
|
||||
|
||||
```bash
|
||||
$ ./scripts/find-maintainer cql3/statements/create_view_statement.hh
|
||||
```
|
||||
|
||||
and you will get output like this:
|
||||
|
||||
```
|
||||
CQL QUERY LANGUAGE
|
||||
Tomasz Grabiec <tgrabiec@scylladb.com> [maintainer]
|
||||
MATERIALIZED VIEWS
|
||||
Nadav Har'El <nyh@scylladb.com> [reviewer]
|
||||
```
|
||||
Detailed instructions for formatting patches for the mailing list and advice on preparing good patches are available at the [ScyllaDB website](http://docs.scylladb.com/contribute/).
|
||||
|
||||
### Running Scylla
|
||||
|
||||
@@ -195,11 +106,11 @@ $ # Edit configuration options as appropriate
|
||||
$ SCYLLA_HOME=$HOME/scylla build/release/scylla
|
||||
```
|
||||
|
||||
The `scylla.yaml` file in the repository by default writes all database data to `/var/lib/scylla`, which likely requires root access. Change the `data_file_directories`, `commitlog_directory` and `schema_commitlog_directory` fields as appropriate.
|
||||
The `scylla.yaml` file in the repository by default writes all database data to `/var/lib/scylla`, which likely requires root access. Change the `data_file_directories` and `commitlog_directory` fields as appropriate.
|
||||
|
||||
Scylla has a number of requirements for the file-system and operating system to operate ideally and at peak performance. However, during development, these requirements can be relaxed with the `--developer-mode` flag.
|
||||
|
||||
Additionally, when running on under-powered platforms like portable laptops, the `--overprovisioned` flag is useful.
|
||||
Additionally, when running on under-powered platforms like portable laptops, the `--overprovisined` flag is useful.
|
||||
|
||||
On a development machine, one might run Scylla as
|
||||
|
||||
@@ -207,29 +118,6 @@ On a development machine, one might run Scylla as
|
||||
$ SCYLLA_HOME=$HOME/scylla build/release/scylla --overprovisioned --developer-mode=yes
|
||||
```
|
||||
|
||||
To interact with scylla it is recommended to build our versions of
|
||||
cqlsh and nodetool. They are available at
|
||||
https://github.com/scylladb/scylla-tools-java and can be built with
|
||||
|
||||
```bash
|
||||
$ sudo ./install-dependencies.sh
|
||||
$ ant jar
|
||||
```
|
||||
|
||||
cqlsh should work out of the box, but nodetool depends on a running
|
||||
scylla-jmx (https://github.com/scylladb/scylla-jmx). It can be build
|
||||
with
|
||||
|
||||
```bash
|
||||
$ mvn package
|
||||
```
|
||||
|
||||
and must be started with
|
||||
|
||||
```bash
|
||||
$ ./scripts/scylla-jmx
|
||||
```
|
||||
|
||||
### Branches and tags
|
||||
|
||||
Multiple release branches are maintained on the Git repository at https://github.com/scylladb/scylla. Release 1.5, for instance, is tracked on the `branch-1.5` branch.
|
||||
@@ -320,7 +208,7 @@ In this example, `10.0.0.2` will be sent up to 16 jobs and the local machine wil
|
||||
|
||||
When a compilation is in progress, the status of jobs on all remote machines can be visualized in the terminal with `distccmon-text` or graphically as a GTK application with `distccmon-gnome`.
|
||||
|
||||
One thing to keep in mind is that linking object files happens on the coordinating machine, which can be a bottleneck. See the next sections speeding up this process.
|
||||
One thing to keep in mind is that linking object files happens on the coordinating machine, which can be a bottleneck. See the next section speeding up this process.
|
||||
|
||||
### Using the `gold` linker
|
||||
|
||||
@@ -330,24 +218,6 @@ Linking Scylla can be slow. The gold linker can replace GNU ld and often speeds
|
||||
$ sudo alternatives --config ld
|
||||
```
|
||||
|
||||
### Using split dwarf
|
||||
|
||||
With debug info enabled, most of the link time is spent copying and
|
||||
relocating it. It is possible to leave most of the debug info out of
|
||||
the link by writing it to a side .dwo file. This is done by passing
|
||||
`-gsplit-dwarf` to gcc.
|
||||
|
||||
Unfortunately just `-gsplit-dwarf` would slow down `gdb` startup. To
|
||||
avoid that the gold linker can be told to create an index with
|
||||
`--gdb-index`.
|
||||
|
||||
More info at https://gcc.gnu.org/wiki/DebugFission.
|
||||
|
||||
Both options can be enable by passing `--split-dwarf` to configure.py.
|
||||
|
||||
Note that distcc is *not* compatible with it, but icecream
|
||||
(https://github.com/icecc/icecream) is.
|
||||
|
||||
### Testing changes in Seastar with Scylla
|
||||
|
||||
Sometimes Scylla development is closely tied with a feature being developed in Seastar. It can be useful to compile Scylla with a particular check-out of Seastar.
|
||||
@@ -361,62 +231,3 @@ $ git remote add local /home/tsmith/src/seastar
|
||||
$ git remote update
|
||||
$ git checkout -t local/my_local_seastar_branch
|
||||
```
|
||||
|
||||
### Generating code coverage report
|
||||
|
||||
Install dependencies:
|
||||
|
||||
$ dnf install llvm # for llvm-profdata and llvm-cov
|
||||
$ dnf install lcov # for genhtml
|
||||
|
||||
Instruct `configure.py` to generate build files for `coverage` mode:
|
||||
|
||||
$ ./configure.py --mode=coverage
|
||||
|
||||
Build the tests you want to run, then run them via `test.py` (important!):
|
||||
|
||||
$ ./test.py --mode=coverage [...]
|
||||
|
||||
Alternatively, you can run individual tests via `./scripts/coverage.py --run`.
|
||||
|
||||
Open the link printed at the end. Be horrified. Go and write more tests.
|
||||
|
||||
For more details see `./scripts/coverage.py --help`.
|
||||
|
||||
### Resolving stack backtraces
|
||||
|
||||
Scylla may print stack backtraces to the log for several reasons.
|
||||
For example:
|
||||
- When aborting (e.g. due to assertion failure, internal error, or segfault)
|
||||
- When detecting seastar reactor stalls (where a seastar task runs for a long time without yielding the cpu to other tasks on that shard)
|
||||
|
||||
The backtraces contain code pointers so they are not very helpful without resolving into code locations.
|
||||
To resolve the backtraces, one needs the scylla relocatable package that contains the scylla binary (with debug information),
|
||||
as well as the dynamic libraries it is linked against.
|
||||
|
||||
Builds from our automated build system are uploaded to the cloud
|
||||
and can be searched on http://backtrace.scylladb.com/
|
||||
|
||||
Make sure you have the scylla server exact `build-id` to locate
|
||||
its respective relocatable package, required for decoding backtraces it prints.
|
||||
|
||||
The build-id is printed to the system log when scylla starts.
|
||||
It can also be found by executing `scylla --build-id`, or
|
||||
by using the `file` utility, for example:
|
||||
```
|
||||
$ scylla --build-id
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
|
||||
$ file scylla
|
||||
scylla: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), dynamically linked, interpreter /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////lib64/ld-linux-x86-64.so.2, for GNU/Linux 3.2.0, BuildID[sha1]=4cba12e6eb290a406bfa4930918db23941fd4be3, with debug_info, not stripped, too many notes (256)
|
||||
```
|
||||
|
||||
To find the build-id of a coredump, use the `eu-unstrip` utility as follows:
|
||||
```
|
||||
$ eu-unstrip -n --core <coredump> | awk '/scylla$/ { s=$2; sub(/@.*$/, "", s); print s; exit(0); }'
|
||||
4cba12e6eb290a406bfa4930918db23941fd4be3
|
||||
```
|
||||
|
||||
### Core dump debugging
|
||||
|
||||
See [debugging.md](docs/dev/debugging.md).
|
||||
|
||||
103
IDL.md
Normal file
103
IDL.md
Normal file
@@ -0,0 +1,103 @@
|
||||
#IDL definition
|
||||
The schema we use similar to c++ schema.
|
||||
Use class or struct similar to the object you need the serializer for.
|
||||
Use namespace when applicable.
|
||||
|
||||
##keywords
|
||||
* class/struct - a class or a struct like C++
|
||||
class/struct can have final or stub marker
|
||||
* namespace - has the same C++ meaning
|
||||
* enum class - has the same C++ meaning
|
||||
* final modifier for class - when a class mark as final it will not contain a size parameter. Note that final class cannot be extended by future version, so use with care
|
||||
* stub class - when a class is mark as stub, it means that no code will be generated for this class and it is only there as a documentation.
|
||||
* version attributes - mark with [[version id ]] mark that a field is available from a specific version
|
||||
* template - A template class definition like C++
|
||||
##Syntax
|
||||
|
||||
###Namespace
|
||||
```
|
||||
namespace ns_name { namespace-body }
|
||||
```
|
||||
* ns_name: either a previously unused identifier, in which case this is original-namespace-definition or the name of a namespace, in which case this is extension-namespace-definition
|
||||
* namespace-body: possibly empty sequence of declarations of any kind (including class and struct definitions as well as nested namespaces)
|
||||
|
||||
###class/struct
|
||||
`
|
||||
class-key class-name final(optional) stub(optional) { member-specification } ;(optional)
|
||||
`
|
||||
* class-key: one of class or struct.
|
||||
* class-name: the name of the class that's being defined. optionally followed by keyword final, optionally followed by keyword stub
|
||||
* final: when a class mark as final, it means it can not be extended and there is no need to serialize its size, use with care.
|
||||
* stub: when a class is mark as stub, it means no code will generate for it and it is added for documentation only.
|
||||
* member-specification: list of access specifiers, and public member accessor see class member below.
|
||||
* to be compatible with C++ a class definition can be followed by a semicolon.
|
||||
###enum
|
||||
`enum-key identifier enum-base { enumerator-list(optional) }`
|
||||
* enum-key: only enum class is supported
|
||||
* identifier: the name of the enumeration that's being declared.
|
||||
* enum-base: colon (:), followed by a type-specifier-seq that names an integral type (see the C++ standard for the full list of all possible integral types).
|
||||
* enumerator-list: comma-separated list of enumerator definitions, each of which is either simply an identifier, which becomes the name of the enumerator, or an identifier with an initializer: identifier = integral value.
|
||||
Note that though C++ allows constexpr as an initialize value, it makes the documentation less readable, hence is not permitted.
|
||||
|
||||
###class member
|
||||
`type member-access attributes(optional) default-value(optional);`
|
||||
* type: Any valid C++ type, following the C++ notation. note that there should be a serializer for the type, but deceleration order is not mandatory
|
||||
* member-access: is the way the member can be access. If the member is public it can be the name itself. if not it could be a getter function that should be followed by braces. Note that getter can (and probably should) be const methods.
|
||||
* attributes: Attributes define by square brackets. Currently are use to mark a version in which a specific member was added [ [ version version-number] ] would mark that the specific member was added in the given version number.
|
||||
|
||||
###template
|
||||
`template < parameter-list > class-declaration`
|
||||
* parameter-list - a non-empty comma-separated list of the template parameters.
|
||||
* class-decleration - (See class section) The class name declared become a template name.
|
||||
|
||||
##IDL example
|
||||
Forward slashes comments are ignored until the end of the line.
|
||||
```
|
||||
namespace utils {
|
||||
// An example of a stub class
|
||||
class UUID stub {
|
||||
int64_t most_sig_bits;
|
||||
int64_t least_sig_bits;
|
||||
}
|
||||
}
|
||||
|
||||
namespace gms {
|
||||
//an enum example
|
||||
enum class application_state:int {STATUS = 0,
|
||||
LOAD,
|
||||
SCHEMA,
|
||||
DC};
|
||||
|
||||
// example of final class
|
||||
class versioned_value final {
|
||||
// getter and setter as public member
|
||||
int version;
|
||||
sstring value;
|
||||
}
|
||||
|
||||
class heart_beat_state {
|
||||
//getter as function
|
||||
int32_t get_generation();
|
||||
//default value example
|
||||
int32_t get_heart_beat_version() = 1;
|
||||
}
|
||||
|
||||
class endpoint_state {
|
||||
heart_beat_state get_heart_beat_state();
|
||||
std::map<application_state, versioned_value> get_application_state_map();
|
||||
}
|
||||
|
||||
class gossip_digest {
|
||||
inet_address get_endpoint();
|
||||
int32_t get_generation();
|
||||
//mark that a field was added on a specific version
|
||||
int32_t get_max_version() [ [version 0.14.2] ];
|
||||
}
|
||||
|
||||
class gossip_digest_ack {
|
||||
std::vector<gossip_digest> digests();
|
||||
std::map<inet_address, gms::endpoint_state> get_endpoint_state_map();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -1,11 +1,2 @@
|
||||
This project includes code developed by the Apache Software Foundation (http://www.apache.org/),
|
||||
especially Apache Cassandra.
|
||||
|
||||
It includes files from https://github.com/antonblanchard/crc32-vpmsum (author Anton Blanchard <anton@au.ibm.com>, IBM).
|
||||
These files are located in utils/arch/powerpc/crc32-vpmsum. Their license may be found in licenses/LICENSE-crc32-vpmsum.TXT.
|
||||
|
||||
It includes modified code from https://gitbox.apache.org/repos/asf?p=cassandra-dtest.git (owned by The Apache Software Foundation)
|
||||
|
||||
It includes modified tests from https://github.com/etcd-io/etcd.git (owned by The etcd Authors)
|
||||
|
||||
It includes files from https://github.com/bytecodealliance/wasmtime-cpp (owned by Bytecode Alliance), licensed with Apache License 2.0.
|
||||
|
||||
29
README-DPDK.md
Normal file
29
README-DPDK.md
Normal file
@@ -0,0 +1,29 @@
|
||||
Seastar and DPDK
|
||||
================
|
||||
|
||||
Seastar uses the Data Plane Development Kit to drive NIC hardware directly. This
|
||||
provides an enormous performance boost.
|
||||
|
||||
To enable DPDK, specify `--enable-dpdk` to `./configure.py`, and `--dpdk-pmd` as a
|
||||
run-time parameter. This will use the DPDK package provided as a git submodule with the
|
||||
seastar sources.
|
||||
|
||||
To use your own self-compiled DPDK package, follow this procedure:
|
||||
|
||||
1. Setup host to compile DPDK:
|
||||
- Ubuntu
|
||||
`sudo apt-get install -y build-essential linux-image-extra-$(uname -r)`
|
||||
2. Prepare a DPDK SDK:
|
||||
- Download the latest DPDK release: `wget http://dpdk.org/browse/dpdk/snapshot/dpdk-1.8.0.tar.gz`
|
||||
- Untar it.
|
||||
- Edit config/common_linuxapp: set CONFIG_RTE_MBUF_REFCNT and CONFIG_RTE_LIBRTE_KNI to 'n'.
|
||||
- For DPDK 1.7.x: edit config/common_linuxapp:
|
||||
- Set CONFIG_RTE_LIBRTE_PMD_BOND to 'n'.
|
||||
- Set CONFIG_RTE_MBUF_SCATTER_GATHER to 'n'.
|
||||
- Set CONFIG_RTE_LIBRTE_IP_FRAG to 'n'.
|
||||
- Start the tools/setup.sh script as root.
|
||||
- Compile a linuxapp target (option 9).
|
||||
- Install IGB_UIO module (option 11).
|
||||
- Bind some physical port to IGB_UIO (option 17).
|
||||
- Configure hugepage mappings (option 14/15).
|
||||
3. Run a configure.py: `./configure.py --dpdk-target <Path to untared dpdk-1.8.0 above>/x86_64-native-linuxapp-gcc`.
|
||||
140
README.md
140
README.md
@@ -1,112 +1,78 @@
|
||||
# Scylla
|
||||
|
||||
[](http://slack.scylladb.com)
|
||||
[](https://twitter.com/intent/follow?screen_name=ScyllaDB)
|
||||
|
||||
## What is Scylla?
|
||||
|
||||
Scylla is the real-time big data database that is API-compatible with Apache Cassandra and Amazon DynamoDB.
|
||||
Scylla embraces a shared-nothing approach that increases throughput and storage capacity to realize order-of-magnitude performance improvements and reduce hardware costs.
|
||||
|
||||
For more information, please see the [ScyllaDB web site].
|
||||
|
||||
[ScyllaDB web site]: https://www.scylladb.com
|
||||
|
||||
## Build Prerequisites
|
||||
|
||||
Scylla is fairly fussy about its build environment, requiring very recent
|
||||
versions of the C++20 compiler and of many libraries to build. The document
|
||||
[HACKING.md](HACKING.md) includes detailed information on building and
|
||||
developing Scylla, but to get Scylla building quickly on (almost) any build
|
||||
machine, Scylla offers a [frozen toolchain](tools/toolchain/README.md),
|
||||
This is a pre-configured Docker image which includes recent versions of all
|
||||
the required compilers, libraries and build tools. Using the frozen toolchain
|
||||
allows you to avoid changing anything in your build machine to meet Scylla's
|
||||
requirements - you just need to meet the frozen toolchain's prerequisites
|
||||
(mostly, Docker or Podman being available).
|
||||
|
||||
## Building Scylla
|
||||
|
||||
Building Scylla with the frozen toolchain `dbuild` is as easy as:
|
||||
## Quick-start
|
||||
|
||||
```bash
|
||||
$ git submodule update --init --force --recursive
|
||||
$ ./tools/toolchain/dbuild ./configure.py
|
||||
$ ./tools/toolchain/dbuild ninja build/release/scylla
|
||||
$ git submodule update --init --recursive
|
||||
$ sudo ./install-dependencies.sh
|
||||
$ ./configure.py --mode=release
|
||||
$ ninja-build -j4 # Assuming 4 system threads.
|
||||
$ ./build/release/scylla
|
||||
$ # Rejoice!
|
||||
```
|
||||
|
||||
For further information, please see:
|
||||
|
||||
* [Developer documentation] for more information on building Scylla.
|
||||
* [Build documentation] on how to build Scylla binaries, tests, and packages.
|
||||
* [Docker image build documentation] for information on how to build Docker images.
|
||||
|
||||
[developer documentation]: HACKING.md
|
||||
[build documentation]: docs/dev/building.md
|
||||
[docker image build documentation]: dist/docker/debian/README.md
|
||||
Please see [HACKING.md](HACKING.md) for detailed information on building and developing Scylla.
|
||||
|
||||
## Running Scylla
|
||||
|
||||
To start Scylla server, run:
|
||||
* Run Scylla
|
||||
```
|
||||
./build/release/scylla
|
||||
|
||||
```bash
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --workdir tmp --smp 1 --developer-mode 1
|
||||
```
|
||||
|
||||
This will start a Scylla node with one CPU core allocated to it and data files stored in the `tmp` directory.
|
||||
The `--developer-mode` is needed to disable the various checks Scylla performs at startup to ensure the machine is configured for maximum performance (not relevant on development workstations).
|
||||
Please note that you need to run Scylla with `dbuild` if you built it with the frozen toolchain.
|
||||
* run Scylla with one CPU and ./tmp as data directory
|
||||
|
||||
For more run options, run:
|
||||
|
||||
```bash
|
||||
$ ./tools/toolchain/dbuild ./build/release/scylla --help
|
||||
```
|
||||
./build/release/scylla --datadir tmp --commitlog-directory tmp --smp 1
|
||||
```
|
||||
|
||||
## Testing
|
||||
* For more run options:
|
||||
```
|
||||
./build/release/scylla --help
|
||||
```
|
||||
|
||||
[](https://github.com/scylladb/scylladb/actions/workflows/seastar.yaml) [](https://github.com/scylladb/scylladb/actions/workflows/reproducible-build.yaml) [](https://github.com/scylladb/scylladb/actions/workflows/clang-nightly.yaml)
|
||||
## Building Fedora RPM
|
||||
|
||||
See [test.py manual](docs/dev/testing.md).
|
||||
As a pre-requisite, you need to install [Mock](https://fedoraproject.org/wiki/Mock) on your machine:
|
||||
|
||||
## Scylla APIs and compatibility
|
||||
By default, Scylla is compatible with Apache Cassandra and its API - CQL.
|
||||
There is also support for the API of Amazon DynamoDB™,
|
||||
which needs to be enabled and configured in order to be used. For more
|
||||
information on how to enable the DynamoDB™ API in Scylla,
|
||||
and the current compatibility of this feature as well as Scylla-specific extensions, see
|
||||
[Alternator](docs/alternator/alternator.md) and
|
||||
[Getting started with Alternator](docs/alternator/getting-started.md).
|
||||
```
|
||||
# Install mock:
|
||||
sudo yum install mock
|
||||
|
||||
## Documentation
|
||||
# Add user to the "mock" group:
|
||||
usermod -a -G mock $USER && newgrp mock
|
||||
```
|
||||
|
||||
Documentation can be found [here](docs/dev/README.md).
|
||||
Seastar documentation can be found [here](http://docs.seastar.io/master/index.html).
|
||||
User documentation can be found [here](https://docs.scylladb.com/).
|
||||
Then, to build an RPM, run:
|
||||
|
||||
## Training
|
||||
```
|
||||
./dist/redhat/build_rpm.sh
|
||||
```
|
||||
|
||||
Training material and online courses can be found at [Scylla University](https://university.scylladb.com/).
|
||||
The courses are free, self-paced and include hands-on examples. They cover a variety of topics including Scylla data modeling,
|
||||
administration, architecture, basic NoSQL concepts, using drivers for application development, Scylla setup, failover, compactions,
|
||||
multi-datacenters and how Scylla integrates with third-party applications.
|
||||
The built RPM is stored in ``/var/lib/mock/<configuration>/result`` directory.
|
||||
For example, on Fedora 21 mock reports the following:
|
||||
|
||||
```
|
||||
INFO: Done(scylla-server-0.00-1.fc21.src.rpm) Config(default) 20 minutes 7 seconds
|
||||
INFO: Results and/or logs in: /var/lib/mock/fedora-21-x86_64/result
|
||||
```
|
||||
|
||||
## Building Fedora-based Docker image
|
||||
|
||||
Build a Docker image with:
|
||||
|
||||
```
|
||||
cd dist/docker
|
||||
docker build -t <image-name> .
|
||||
```
|
||||
|
||||
Run the image with:
|
||||
|
||||
```
|
||||
docker run -p $(hostname -i):9042:9042 -i -t <image name>
|
||||
```
|
||||
|
||||
## Contributing to Scylla
|
||||
|
||||
If you want to report a bug or submit a pull request or a patch, please read the [contribution guidelines].
|
||||
|
||||
If you are a developer working on Scylla, please read the [developer guidelines].
|
||||
|
||||
[contribution guidelines]: CONTRIBUTING.md
|
||||
[developer guidelines]: HACKING.md
|
||||
|
||||
## Contact
|
||||
|
||||
* The [community forum] and [Slack channel] are for users to discuss configuration, management, and operations of the ScyllaDB open source.
|
||||
* The [developers mailing list] is for developers and people interested in following the development of ScyllaDB to discuss technical topics.
|
||||
|
||||
[Community forum]: https://forum.scylladb.com/
|
||||
|
||||
[Slack channel]: http://slack.scylladb.com/
|
||||
|
||||
[Developers mailing list]: https://groups.google.com/forum/#!forum/scylladb-dev
|
||||
[Guidelines for contributing](CONTRIBUTING.md)
|
||||
|
||||
@@ -1,119 +1,24 @@
|
||||
#!/bin/sh
|
||||
|
||||
USAGE=$(cat <<-END
|
||||
Usage: $(basename "$0") [-h|--help] [-o|--output-dir PATH] [--date-stamp DATE] -- generate Scylla version and build information files.
|
||||
|
||||
Options:
|
||||
-h|--help show this help message.
|
||||
-o|--output-dir PATH specify destination path at which the version files are to be created.
|
||||
-d|--date-stamp DATE manually set date for release parameter
|
||||
-v|--verbose also print out the version number
|
||||
|
||||
By default, the script will attempt to parse 'version' file
|
||||
in the current directory, which should contain a string of
|
||||
'\$version-\$release' form.
|
||||
|
||||
Otherwise, it will call 'git log' on the source tree (the
|
||||
directory, which contains the script) to obtain current
|
||||
commit hash and use it for building the version and release
|
||||
strings.
|
||||
|
||||
The script assumes that it's called from the Scylla source
|
||||
tree.
|
||||
|
||||
The files created are:
|
||||
SCYLLA-VERSION-FILE
|
||||
SCYLLA-RELEASE-FILE
|
||||
SCYLLA-PRODUCT-FILE
|
||||
|
||||
By default, these files are created in the 'build'
|
||||
subdirectory under the directory containing the script.
|
||||
The destination directory can be overridden by
|
||||
using '-o PATH' option.
|
||||
END
|
||||
)
|
||||
|
||||
DATE=""
|
||||
PRINT_VERSION=false
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
opt="$1"
|
||||
case $opt in
|
||||
-h|--help)
|
||||
echo "$USAGE"
|
||||
exit 0
|
||||
;;
|
||||
-o|--output-dir)
|
||||
OUTPUT_DIR="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
--date-stamp)
|
||||
DATE="$2"
|
||||
shift
|
||||
shift
|
||||
;;
|
||||
-v|--verbose)
|
||||
PRINT_VERSION=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unexpected argument found: $1"
|
||||
echo
|
||||
echo "$USAGE"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
SCRIPT_DIR="$(dirname "$0")"
|
||||
|
||||
if [ -z "$OUTPUT_DIR" ]; then
|
||||
OUTPUT_DIR="$SCRIPT_DIR/build"
|
||||
fi
|
||||
|
||||
if [ -z "$DATE" ]; then
|
||||
DATE=$(date --utc +%Y%m%d)
|
||||
fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=6.1.6
|
||||
VERSION=2.1.6
|
||||
|
||||
if test -f version
|
||||
then
|
||||
SCYLLA_VERSION=$(cat version | awk -F'-' '{print $1}')
|
||||
SCYLLA_RELEASE=$(cat version | awk -F'-' '{print $2}')
|
||||
else
|
||||
DATE=$(date +%Y%m%d)
|
||||
GIT_COMMIT=$(git log --pretty=format:'%h' -n 1)
|
||||
SCYLLA_VERSION=$VERSION
|
||||
if [ -z "$SCYLLA_RELEASE" ]; then
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1 --abbrev=12)
|
||||
# For custom package builds, replace "0" with "counter.yourname",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
# Do not use any special characters like - or _ in the name above!
|
||||
# These characters either have special meaning or are illegal in
|
||||
# version strings.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
elif [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
echo "setting SCYLLA_RELEASE only makes sense in clean builds" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
fi
|
||||
|
||||
if [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
GIT_COMMIT_FILE=$(cat "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" | rev | cut -d . -f 1 | rev)
|
||||
if [ "$GIT_COMMIT" = "$GIT_COMMIT_FILE" ]; then
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if $PRINT_VERSION; then
|
||||
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
||||
fi
|
||||
mkdir -p "$OUTPUT_DIR"
|
||||
echo "$SCYLLA_VERSION" > "$OUTPUT_DIR/SCYLLA-VERSION-FILE"
|
||||
echo "$SCYLLA_RELEASE" > "$OUTPUT_DIR/SCYLLA-RELEASE-FILE"
|
||||
echo "$PRODUCT" > "$OUTPUT_DIR/SCYLLA-PRODUCT-FILE"
|
||||
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
||||
mkdir -p build
|
||||
echo "$SCYLLA_VERSION" > build/SCYLLA-VERSION-FILE
|
||||
echo "$SCYLLA_RELEASE" > build/SCYLLA-RELEASE-FILE
|
||||
|
||||
1
abseil
1
abseil
Submodule abseil deleted from d7aaad83b4
@@ -1,13 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "absl-flat_hash_map.hh"
|
||||
|
||||
size_t sstring_hash::operator()(std::string_view v) const noexcept {
|
||||
return absl::Hash<std::string_view>{}(v);
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <absl/container/flat_hash_map.h>
|
||||
#include <seastar/core/sstring.hh>
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
struct sstring_hash {
|
||||
using is_transparent = void;
|
||||
size_t operator()(std::string_view v) const noexcept;
|
||||
};
|
||||
|
||||
struct sstring_eq {
|
||||
using is_transparent = void;
|
||||
bool operator()(std::string_view a, std::string_view b) const noexcept {
|
||||
return a == b;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename K, typename V, typename... Ts>
|
||||
struct flat_hash_map : public absl::flat_hash_map<K, V, Ts...> {
|
||||
};
|
||||
|
||||
template <typename V>
|
||||
struct flat_hash_map<sstring, V>
|
||||
: public absl::flat_hash_map<sstring, V, sstring_hash, sstring_eq> {};
|
||||
@@ -1,34 +0,0 @@
|
||||
include(generate_cql_grammar)
|
||||
generate_cql_grammar(
|
||||
GRAMMAR expressions.g
|
||||
SOURCES cql_grammar_srcs)
|
||||
|
||||
add_library(alternator STATIC)
|
||||
target_sources(alternator
|
||||
PRIVATE
|
||||
controller.cc
|
||||
server.cc
|
||||
executor.cc
|
||||
stats.cc
|
||||
serialization.cc
|
||||
expressions.cc
|
||||
conditions.cc
|
||||
auth.cc
|
||||
streams.cc
|
||||
ttl.cc
|
||||
${cql_grammar_srcs})
|
||||
target_include_directories(alternator
|
||||
PUBLIC
|
||||
${CMAKE_SOURCE_DIR}
|
||||
${CMAKE_BINARY_DIR}
|
||||
PRIVATE
|
||||
${RAPIDJSON_INCLUDE_DIRS})
|
||||
target_link_libraries(alternator
|
||||
cql3
|
||||
idl
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
absl::headers)
|
||||
|
||||
check_headers(check-headers alternator
|
||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
@@ -1,70 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "alternator/error.hh"
|
||||
#include "auth/common.hh"
|
||||
#include "log.hh"
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include "bytes.hh"
|
||||
#include "alternator/auth.hh"
|
||||
#include <fmt/format.h>
|
||||
#include "auth/password_authenticator.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "alternator/executor.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "cql3/result_set.hh"
|
||||
#include "types/types.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
static logging::logger alogger("alternator-auth");
|
||||
|
||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::service& as, std::string username) {
|
||||
schema_ptr schema = proxy.data_dictionary().find_schema(auth::get_auth_ks_name(as.query_processor()), "roles");
|
||||
partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username));
|
||||
dht::partition_range_vector partition_ranges{dht::partition_range(dht::decorate_key(*schema, pk))};
|
||||
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
const column_definition* salted_hash_col = schema->get_column_definition(bytes("salted_hash"));
|
||||
const column_definition* can_login_col = schema->get_column_definition(bytes("can_login"));
|
||||
if (!salted_hash_col || !can_login_col) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("Credentials cannot be fetched for: {}", username)));
|
||||
}
|
||||
auto selection = cql3::selection::selection::for_columns(schema, {salted_hash_col, can_login_col});
|
||||
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id, can_login_col->id}, selection->get_query_options());
|
||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice,
|
||||
proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
auto cl = auth::password_authenticator::consistency_for_user(username);
|
||||
|
||||
service::client_state client_state{service::client_state::internal_tag()};
|
||||
service::storage_proxy::coordinator_query_result qr = co_await proxy.query(schema, std::move(command), std::move(partition_ranges), cl,
|
||||
service::storage_proxy::coordinator_query_options(executor::default_timeout(), empty_service_permit(), client_state));
|
||||
|
||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now());
|
||||
query::result_view::consume(*qr.query_result, partition_slice, cql3::selection::result_set_builder::visitor(builder, *schema, *selection));
|
||||
|
||||
auto result_set = builder.build();
|
||||
if (result_set->empty()) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("User not found: {}", username)));
|
||||
}
|
||||
const auto& result = result_set->rows().front();
|
||||
bool can_login = result[1] && value_cast<bool>(boolean_type->deserialize(*result[1]));
|
||||
if (!can_login) {
|
||||
// This is a valid role name, but has "login=False" so should not be
|
||||
// usable for authentication (see #19735).
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("Role {} has login=false so cannot be used for login", username)));
|
||||
}
|
||||
const managed_bytes_opt& salted_hash = result.front();
|
||||
if (!salted_hash) {
|
||||
co_await coroutine::return_exception(api_error::unrecognized_client(format("No password found for user: {}", username)));
|
||||
}
|
||||
co_return value_cast<sstring>(utf8_type->deserialize(*salted_hash));
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include "utils/loading_cache.hh"
|
||||
#include "auth/service.hh"
|
||||
|
||||
namespace service {
|
||||
class storage_proxy;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
using key_cache = utils::loading_cache<std::string, std::string, 1>;
|
||||
|
||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::service& as, std::string username);
|
||||
|
||||
}
|
||||
@@ -1,758 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <string_view>
|
||||
#include "alternator/conditions.hh"
|
||||
#include "alternator/error.hh"
|
||||
#include <unordered_map>
|
||||
#include "utils/rjson.hh"
|
||||
#include "serialization.hh"
|
||||
#include "utils/base64.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include <stdexcept>
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
#include "expressions.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
static logging::logger clogger("alternator-conditions");
|
||||
|
||||
comparison_operator_type get_comparison_operator(const rjson::value& comparison_operator) {
|
||||
static std::unordered_map<std::string, comparison_operator_type> ops = {
|
||||
{"EQ", comparison_operator_type::EQ},
|
||||
{"NE", comparison_operator_type::NE},
|
||||
{"LE", comparison_operator_type::LE},
|
||||
{"LT", comparison_operator_type::LT},
|
||||
{"GE", comparison_operator_type::GE},
|
||||
{"GT", comparison_operator_type::GT},
|
||||
{"IN", comparison_operator_type::IN},
|
||||
{"NULL", comparison_operator_type::IS_NULL},
|
||||
{"NOT_NULL", comparison_operator_type::NOT_NULL},
|
||||
{"BETWEEN", comparison_operator_type::BETWEEN},
|
||||
{"BEGINS_WITH", comparison_operator_type::BEGINS_WITH},
|
||||
{"CONTAINS", comparison_operator_type::CONTAINS},
|
||||
{"NOT_CONTAINS", comparison_operator_type::NOT_CONTAINS},
|
||||
};
|
||||
if (!comparison_operator.IsString()) {
|
||||
throw api_error::validation(format("Invalid comparison operator definition {}", rjson::print(comparison_operator)));
|
||||
}
|
||||
std::string op = comparison_operator.GetString();
|
||||
auto it = ops.find(op);
|
||||
if (it == ops.end()) {
|
||||
throw api_error::validation(format("Unsupported comparison operator {}", op));
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
struct size_check {
|
||||
// True iff size passes this check.
|
||||
virtual bool operator()(rapidjson::SizeType size) const = 0;
|
||||
// Check description, such that format("expected array {}", check.what()) is human-readable.
|
||||
virtual sstring what() const = 0;
|
||||
};
|
||||
|
||||
class exact_size : public size_check {
|
||||
rapidjson::SizeType _expected;
|
||||
public:
|
||||
explicit exact_size(rapidjson::SizeType expected) : _expected(expected) {}
|
||||
bool operator()(rapidjson::SizeType size) const override { return size == _expected; }
|
||||
sstring what() const override { return format("of size {}", _expected); }
|
||||
};
|
||||
|
||||
struct empty : public size_check {
|
||||
bool operator()(rapidjson::SizeType size) const override { return size < 1; }
|
||||
sstring what() const override { return "to be empty"; }
|
||||
};
|
||||
|
||||
struct nonempty : public size_check {
|
||||
bool operator()(rapidjson::SizeType size) const override { return size > 0; }
|
||||
sstring what() const override { return "to be non-empty"; }
|
||||
};
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
// Check that array has the expected number of elements
|
||||
static void verify_operand_count(const rjson::value* array, const size_check& expected, const rjson::value& op) {
|
||||
if (!array && expected(0)) {
|
||||
// If expected() allows an empty AttributeValueList, it is also fine
|
||||
// that it is missing.
|
||||
return;
|
||||
}
|
||||
if (!array || !array->IsArray()) {
|
||||
throw api_error::validation("With ComparisonOperator, AttributeValueList must be given and an array");
|
||||
}
|
||||
if (!expected(array->Size())) {
|
||||
throw api_error::validation(
|
||||
format("{} operator requires AttributeValueList {}, instead found list size {}",
|
||||
op, expected.what(), array->Size()));
|
||||
}
|
||||
}
|
||||
|
||||
struct rjson_engaged_ptr_comp {
|
||||
bool operator()(const rjson::value* p1, const rjson::value* p2) const {
|
||||
return rjson::single_value_comp()(*p1, *p2);
|
||||
}
|
||||
};
|
||||
|
||||
// It's not enough to compare underlying JSON objects when comparing sets,
|
||||
// as internally they're stored in an array, and the order of elements is
|
||||
// not important in set equality. See issue #5021
|
||||
static bool check_EQ_for_sets(const rjson::value& set1, const rjson::value& set2) {
|
||||
if (!set1.IsArray() || !set2.IsArray() || set1.Size() != set2.Size()) {
|
||||
return false;
|
||||
}
|
||||
std::set<const rjson::value*, rjson_engaged_ptr_comp> set1_raw;
|
||||
for (auto it = set1.Begin(); it != set1.End(); ++it) {
|
||||
set1_raw.insert(&*it);
|
||||
}
|
||||
for (const auto& a : set2.GetArray()) {
|
||||
if (!set1_raw.contains(&a)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
// Moreover, the JSON being compared can be a nested document with outer
|
||||
// layers of lists and maps and some inner set - and we need to get to that
|
||||
// inner set to compare it correctly with check_EQ_for_sets() (issue #8514).
|
||||
static bool check_EQ(const rjson::value* v1, const rjson::value& v2);
|
||||
static bool check_EQ_for_lists(const rjson::value& list1, const rjson::value& list2) {
|
||||
if (!list1.IsArray() || !list2.IsArray() || list1.Size() != list2.Size()) {
|
||||
return false;
|
||||
}
|
||||
auto it1 = list1.Begin();
|
||||
auto it2 = list2.Begin();
|
||||
while (it1 != list1.End()) {
|
||||
// Note: Alternator limits an item's depth (rjson::parse() limits
|
||||
// it to around 37 levels), so this recursion is safe.
|
||||
if (!check_EQ(&*it1, *it2)) {
|
||||
return false;
|
||||
}
|
||||
++it1;
|
||||
++it2;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
static bool check_EQ_for_maps(const rjson::value& list1, const rjson::value& list2) {
|
||||
if (!list1.IsObject() || !list2.IsObject() || list1.MemberCount() != list2.MemberCount()) {
|
||||
return false;
|
||||
}
|
||||
for (auto it1 = list1.MemberBegin(); it1 != list1.MemberEnd(); ++it1) {
|
||||
auto it2 = list2.FindMember(it1->name);
|
||||
if (it2 == list2.MemberEnd() || !check_EQ(&it1->value, it2->value)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the EQ relation
|
||||
static bool check_EQ(const rjson::value* v1, const rjson::value& v2) {
|
||||
if (v1 && v1->IsObject() && v1->MemberCount() == 1 && v2.IsObject() && v2.MemberCount() == 1) {
|
||||
auto it1 = v1->MemberBegin();
|
||||
auto it2 = v2.MemberBegin();
|
||||
if (it1->name != it2->name) {
|
||||
return false;
|
||||
}
|
||||
if (it1->name == "SS" || it1->name == "NS" || it1->name == "BS") {
|
||||
return check_EQ_for_sets(it1->value, it2->value);
|
||||
} else if(it1->name == "L") {
|
||||
return check_EQ_for_lists(it1->value, it2->value);
|
||||
} else if(it1->name == "M") {
|
||||
return check_EQ_for_maps(it1->value, it2->value);
|
||||
} else {
|
||||
// Other, non-nested types (number, string, etc.) can be compared
|
||||
// literally, comparing their JSON representation.
|
||||
return it1->value == it2->value;
|
||||
}
|
||||
} else {
|
||||
// If v1 and/or v2 are missing (IsNull()) the result should be false.
|
||||
// In the unlikely case that the object is malformed (issue #8070),
|
||||
// let's also return false.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the NE relation
|
||||
static bool check_NE(const rjson::value* v1, const rjson::value& v2) {
|
||||
return !check_EQ(v1, v2);
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the BEGINS_WITH relation
|
||||
bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2,
|
||||
bool v1_from_query, bool v2_from_query) {
|
||||
bool bad = false;
|
||||
if (!v1 || !v1->IsObject() || v1->MemberCount() != 1) {
|
||||
if (v1_from_query) {
|
||||
throw api_error::validation("begins_with() encountered malformed argument");
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
} else if (v1->MemberBegin()->name != "S" && v1->MemberBegin()->name != "B") {
|
||||
if (v1_from_query) {
|
||||
throw api_error::validation(format("begins_with supports only string or binary type, got: {}", *v1));
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
if (!v2.IsObject() || v2.MemberCount() != 1) {
|
||||
if (v2_from_query) {
|
||||
throw api_error::validation("begins_with() encountered malformed argument");
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
} else if (v2.MemberBegin()->name != "S" && v2.MemberBegin()->name != "B") {
|
||||
if (v2_from_query) {
|
||||
throw api_error::validation(format("begins_with() supports only string or binary type, got: {}", v2));
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
if (bad) {
|
||||
return false;
|
||||
}
|
||||
auto it1 = v1->MemberBegin();
|
||||
auto it2 = v2.MemberBegin();
|
||||
if (it1->name != it2->name) {
|
||||
return false;
|
||||
}
|
||||
if (it2->name == "S") {
|
||||
return rjson::to_string_view(it1->value).starts_with(rjson::to_string_view(it2->value));
|
||||
} else /* it2->name == "B" */ {
|
||||
try {
|
||||
return base64_begins_with(rjson::to_string_view(it1->value), rjson::to_string_view(it2->value));
|
||||
} catch(std::invalid_argument&) {
|
||||
// determine if any of the malformed values is from query and raise an exception if so
|
||||
unwrap_bytes(it1->value, v1_from_query);
|
||||
unwrap_bytes(it2->value, v2_from_query);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_set_of(const rjson::value& type1, const rjson::value& type2) {
|
||||
return (type2 == "S" && type1 == "SS") || (type2 == "N" && type1 == "NS") || (type2 == "B" && type1 == "BS");
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the CONTAINS relation
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query) {
|
||||
if (!v1) {
|
||||
return false;
|
||||
}
|
||||
const auto& kv1 = *v1->MemberBegin();
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv1.name == "S" && kv2.name == "S") {
|
||||
return rjson::to_string_view(kv1.value).find(rjson::to_string_view(kv2.value)) != std::string_view::npos;
|
||||
} else if (kv1.name == "B" && kv2.name == "B") {
|
||||
auto d_kv1 = unwrap_bytes(kv1.value, v1_from_query);
|
||||
auto d_kv2 = unwrap_bytes(kv2.value, v2_from_query);
|
||||
if (!d_kv1 || !d_kv2) {
|
||||
return false;
|
||||
}
|
||||
return d_kv1->find(*d_kv2) != bytes::npos;
|
||||
} else if (is_set_of(kv1.name, kv2.name)) {
|
||||
for (auto i = kv1.value.Begin(); i != kv1.value.End(); ++i) {
|
||||
if (*i == kv2.value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else if (kv1.name == "L") {
|
||||
for (auto i = kv1.value.Begin(); i != kv1.value.End(); ++i) {
|
||||
if (!i->IsObject() || i->MemberCount() != 1) {
|
||||
clogger.error("check_CONTAINS received a list whose element is malformed");
|
||||
return false;
|
||||
}
|
||||
const auto& el = *i->MemberBegin();
|
||||
if (el.name == kv2.name && el.value == kv2.value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with the NOT_CONTAINS relation
|
||||
static bool check_NOT_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query) {
|
||||
if (!v1) {
|
||||
return false;
|
||||
}
|
||||
return !check_CONTAINS(v1, v2, v1_from_query, v2_from_query);
|
||||
}
|
||||
|
||||
// Check if a JSON-encoded value equals any element of an array, which must have at least one element.
|
||||
static bool check_IN(const rjson::value* val, const rjson::value& array) {
|
||||
if (!array[0].IsObject() || array[0].MemberCount() != 1) {
|
||||
throw api_error::validation(
|
||||
format("IN operator encountered malformed AttributeValue: {}", array[0]));
|
||||
}
|
||||
const auto& type = array[0].MemberBegin()->name;
|
||||
if (type != "S" && type != "N" && type != "B") {
|
||||
throw api_error::validation(
|
||||
"IN operator requires AttributeValueList elements to be of type String, Number, or Binary ");
|
||||
}
|
||||
if (!val) {
|
||||
return false;
|
||||
}
|
||||
bool have_match = false;
|
||||
for (const auto& elem : array.GetArray()) {
|
||||
if (!elem.IsObject() || elem.MemberCount() != 1 || elem.MemberBegin()->name != type) {
|
||||
throw api_error::validation(
|
||||
"IN operator requires all AttributeValueList elements to have the same type ");
|
||||
}
|
||||
if (!have_match && *val == elem) {
|
||||
// Can't return yet, must check types of all array elements. <sigh>
|
||||
have_match = true;
|
||||
}
|
||||
}
|
||||
return have_match;
|
||||
}
|
||||
|
||||
// Another variant of check_IN, this one for ConditionExpression. It needs to
|
||||
// check whether the first element in the given vector is equal to any of the
|
||||
// others.
|
||||
static bool check_IN(const std::vector<rjson::value>& array) {
|
||||
const rjson::value* first = &array[0];
|
||||
for (unsigned i = 1; i < array.size(); i++) {
|
||||
if (check_EQ(first, array[i])) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool check_NULL(const rjson::value* val) {
|
||||
return val == nullptr;
|
||||
}
|
||||
|
||||
static bool check_NOT_NULL(const rjson::value* val) {
|
||||
return val != nullptr;
|
||||
}
|
||||
|
||||
// Only types S, N or B (string, number or bytes) may be compared by the
|
||||
// various comparison operators - lt, le, gt, ge, and between.
|
||||
// Note that in particular, if the value is missing (v->IsNull()), this
|
||||
// check returns false.
|
||||
static bool check_comparable_type(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
const rjson::value& type = v.MemberBegin()->name;
|
||||
return type == "S" || type == "N" || type == "B";
|
||||
}
|
||||
|
||||
// Check if two JSON-encoded values match with cmp.
|
||||
template <typename Comparator>
|
||||
bool check_compare(const rjson::value* v1, const rjson::value& v2, const Comparator& cmp,
|
||||
bool v1_from_query, bool v2_from_query) {
|
||||
bool bad = false;
|
||||
if (!v1 || !check_comparable_type(*v1)) {
|
||||
if (v1_from_query) {
|
||||
throw api_error::validation(format("{} allow only the types String, Number, or Binary", cmp.diagnostic));
|
||||
}
|
||||
bad = true;
|
||||
}
|
||||
if (!check_comparable_type(v2)) {
|
||||
if (v2_from_query) {
|
||||
throw api_error::validation(format("{} allow only the types String, Number, or Binary", cmp.diagnostic));
|
||||
}
|
||||
bad = true;
|
||||
}
|
||||
if (bad) {
|
||||
return false;
|
||||
}
|
||||
const auto& kv1 = *v1->MemberBegin();
|
||||
const auto& kv2 = *v2.MemberBegin();
|
||||
if (kv1.name != kv2.name) {
|
||||
return false;
|
||||
}
|
||||
if (kv1.name == "N") {
|
||||
return cmp(unwrap_number(*v1, cmp.diagnostic), unwrap_number(v2, cmp.diagnostic));
|
||||
}
|
||||
if (kv1.name == "S") {
|
||||
return cmp(std::string_view(kv1.value.GetString(), kv1.value.GetStringLength()),
|
||||
std::string_view(kv2.value.GetString(), kv2.value.GetStringLength()));
|
||||
}
|
||||
if (kv1.name == "B") {
|
||||
auto d_kv1 = unwrap_bytes(kv1.value, v1_from_query);
|
||||
auto d_kv2 = unwrap_bytes(kv2.value, v2_from_query);
|
||||
if(!d_kv1 || !d_kv2) {
|
||||
return false;
|
||||
}
|
||||
return cmp(*d_kv1, *d_kv2);
|
||||
}
|
||||
// cannot reach here, as check_comparable_type() verifies the type is one
|
||||
// of the above options.
|
||||
return false;
|
||||
}
|
||||
|
||||
struct cmp_lt {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs < rhs; }
|
||||
// We cannot use the normal comparison operators like "<" on the bytes
|
||||
// type, because they treat individual bytes as signed but we need to
|
||||
// compare them as *unsigned*. So we need a specialization for bytes.
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) < 0; }
|
||||
static constexpr const char* diagnostic = "LT operator";
|
||||
};
|
||||
|
||||
struct cmp_le {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs <= rhs; }
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) <= 0; }
|
||||
static constexpr const char* diagnostic = "LE operator";
|
||||
};
|
||||
|
||||
struct cmp_ge {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs >= rhs; }
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) >= 0; }
|
||||
static constexpr const char* diagnostic = "GE operator";
|
||||
};
|
||||
|
||||
struct cmp_gt {
|
||||
template <typename T> bool operator()(const T& lhs, const T& rhs) const { return lhs > rhs; }
|
||||
bool operator()(const bytes& lhs, const bytes& rhs) const { return compare_unsigned(lhs, rhs) > 0; }
|
||||
static constexpr const char* diagnostic = "GT operator";
|
||||
};
|
||||
|
||||
// True if v is between lb and ub, inclusive. Throws or returns false
|
||||
// (depending on bounds_from_query parameter) if lb > ub.
|
||||
template <typename T>
|
||||
static bool check_BETWEEN(const T& v, const T& lb, const T& ub, bool bounds_from_query) {
|
||||
if (cmp_lt()(ub, lb)) {
|
||||
if (bounds_from_query) {
|
||||
throw api_error::validation(
|
||||
format("BETWEEN operator requires lower_bound <= upper_bound, but {} > {}", lb, ub));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return cmp_ge()(v, lb) && cmp_le()(v, ub);
|
||||
}
|
||||
|
||||
static bool check_BETWEEN(const rjson::value* v, const rjson::value& lb, const rjson::value& ub,
|
||||
bool v_from_query, bool lb_from_query, bool ub_from_query) {
|
||||
if ((v && v_from_query && !check_comparable_type(*v)) ||
|
||||
(lb_from_query && !check_comparable_type(lb)) ||
|
||||
(ub_from_query && !check_comparable_type(ub))) {
|
||||
throw api_error::validation("between allow only the types String, Number, or Binary");
|
||||
|
||||
}
|
||||
if (!v || !v->IsObject() || v->MemberCount() != 1 ||
|
||||
!lb.IsObject() || lb.MemberCount() != 1 ||
|
||||
!ub.IsObject() || ub.MemberCount() != 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const auto& kv_v = *v->MemberBegin();
|
||||
const auto& kv_lb = *lb.MemberBegin();
|
||||
const auto& kv_ub = *ub.MemberBegin();
|
||||
bool bounds_from_query = lb_from_query && ub_from_query;
|
||||
if (kv_lb.name != kv_ub.name) {
|
||||
if (bounds_from_query) {
|
||||
throw api_error::validation(
|
||||
format("BETWEEN operator requires the same type for lower and upper bound; instead got {} and {}",
|
||||
kv_lb.name, kv_ub.name));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (kv_v.name != kv_lb.name) { // Cannot compare different types, so v is NOT between lb and ub.
|
||||
return false;
|
||||
}
|
||||
if (kv_v.name == "N") {
|
||||
const char* diag = "BETWEEN operator";
|
||||
return check_BETWEEN(unwrap_number(*v, diag), unwrap_number(lb, diag), unwrap_number(ub, diag), bounds_from_query);
|
||||
}
|
||||
if (kv_v.name == "S") {
|
||||
return check_BETWEEN(std::string_view(kv_v.value.GetString(), kv_v.value.GetStringLength()),
|
||||
std::string_view(kv_lb.value.GetString(), kv_lb.value.GetStringLength()),
|
||||
std::string_view(kv_ub.value.GetString(), kv_ub.value.GetStringLength()),
|
||||
bounds_from_query);
|
||||
}
|
||||
if (kv_v.name == "B") {
|
||||
auto d_kv_v = unwrap_bytes(kv_v.value, v_from_query);
|
||||
auto d_kv_lb = unwrap_bytes(kv_lb.value, lb_from_query);
|
||||
auto d_kv_ub = unwrap_bytes(kv_ub.value, ub_from_query);
|
||||
if(!d_kv_v || !d_kv_lb || !d_kv_ub) {
|
||||
return false;
|
||||
}
|
||||
return check_BETWEEN(*d_kv_v, *d_kv_lb, *d_kv_ub, bounds_from_query);
|
||||
}
|
||||
if (v_from_query) {
|
||||
throw api_error::validation(
|
||||
format("BETWEEN operator requires AttributeValueList elements to be of type String, Number, or Binary; instead got {}",
|
||||
kv_lb.name));
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Verify one Expect condition on one attribute (whose content is "got")
|
||||
// for the verify_expected() below.
|
||||
// This function returns true or false depending on whether the condition
|
||||
// succeeded - it does not throw ConditionalCheckFailedException.
|
||||
// However, it may throw ValidationException on input validation errors.
|
||||
static bool verify_expected_one(const rjson::value& condition, const rjson::value* got) {
|
||||
const rjson::value* comparison_operator = rjson::find(condition, "ComparisonOperator");
|
||||
const rjson::value* attribute_value_list = rjson::find(condition, "AttributeValueList");
|
||||
const rjson::value* value = rjson::find(condition, "Value");
|
||||
const rjson::value* exists = rjson::find(condition, "Exists");
|
||||
// There are three types of conditions that Expected supports:
|
||||
// A value, not-exists, and a comparison of some kind. Each allows
|
||||
// and requires a different combinations of parameters in the request
|
||||
if (value) {
|
||||
if (exists && (!exists->IsBool() || exists->GetBool() != true)) {
|
||||
throw api_error::validation("Cannot combine Value with Exists!=true");
|
||||
}
|
||||
if (comparison_operator) {
|
||||
throw api_error::validation("Cannot combine Value with ComparisonOperator");
|
||||
}
|
||||
return check_EQ(got, *value);
|
||||
} else if (exists) {
|
||||
if (comparison_operator) {
|
||||
throw api_error::validation("Cannot combine Exists with ComparisonOperator");
|
||||
}
|
||||
if (!exists->IsBool() || exists->GetBool() != false) {
|
||||
throw api_error::validation("Exists!=false requires Value");
|
||||
}
|
||||
// Remember Exists=false, so we're checking that the attribute does *not* exist:
|
||||
return !got;
|
||||
} else {
|
||||
if (!comparison_operator) {
|
||||
throw api_error::validation("Missing ComparisonOperator, Value or Exists");
|
||||
}
|
||||
comparison_operator_type op = get_comparison_operator(*comparison_operator);
|
||||
switch (op) {
|
||||
case comparison_operator_type::EQ:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_EQ(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::NE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_NE(got, (*attribute_value_list)[0]);
|
||||
case comparison_operator_type::LT:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_lt{}, false, true);
|
||||
case comparison_operator_type::LE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_le{}, false, true);
|
||||
case comparison_operator_type::GT:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_gt{}, false, true);
|
||||
case comparison_operator_type::GE:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_compare(got, (*attribute_value_list)[0], cmp_ge{}, false, true);
|
||||
case comparison_operator_type::BEGINS_WITH:
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
return check_BEGINS_WITH(got, (*attribute_value_list)[0], false, true);
|
||||
case comparison_operator_type::IN:
|
||||
verify_operand_count(attribute_value_list, nonempty(), *comparison_operator);
|
||||
return check_IN(got, *attribute_value_list);
|
||||
case comparison_operator_type::IS_NULL:
|
||||
verify_operand_count(attribute_value_list, empty(), *comparison_operator);
|
||||
return check_NULL(got);
|
||||
case comparison_operator_type::NOT_NULL:
|
||||
verify_operand_count(attribute_value_list, empty(), *comparison_operator);
|
||||
return check_NOT_NULL(got);
|
||||
case comparison_operator_type::BETWEEN:
|
||||
verify_operand_count(attribute_value_list, exact_size(2), *comparison_operator);
|
||||
return check_BETWEEN(got, (*attribute_value_list)[0], (*attribute_value_list)[1],
|
||||
false, true, true);
|
||||
case comparison_operator_type::CONTAINS:
|
||||
{
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
// Expected's "CONTAINS" has this artificial limitation.
|
||||
// ConditionExpression's "contains()" does not...
|
||||
const rjson::value& arg = (*attribute_value_list)[0];
|
||||
const auto& argtype = (*arg.MemberBegin()).name;
|
||||
if (argtype != "S" && argtype != "N" && argtype != "B") {
|
||||
throw api_error::validation(
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", argtype));
|
||||
}
|
||||
return check_CONTAINS(got, arg, false, true);
|
||||
}
|
||||
case comparison_operator_type::NOT_CONTAINS:
|
||||
{
|
||||
verify_operand_count(attribute_value_list, exact_size(1), *comparison_operator);
|
||||
// Expected's "NOT_CONTAINS" has this artificial limitation.
|
||||
// ConditionExpression's "contains()" does not...
|
||||
const rjson::value& arg = (*attribute_value_list)[0];
|
||||
const auto& argtype = (*arg.MemberBegin()).name;
|
||||
if (argtype != "S" && argtype != "N" && argtype != "B") {
|
||||
throw api_error::validation(
|
||||
format("CONTAINS operator requires a single AttributeValue of type String, Number, or Binary, "
|
||||
"got {} instead", argtype));
|
||||
}
|
||||
return check_NOT_CONTAINS(got, arg, false, true);
|
||||
}
|
||||
}
|
||||
throw std::logic_error(format("Internal error: corrupted operator enum: {}", int(op)));
|
||||
}
|
||||
}
|
||||
|
||||
conditional_operator_type get_conditional_operator(const rjson::value& req) {
|
||||
const rjson::value* conditional_operator = rjson::find(req, "ConditionalOperator");
|
||||
if (!conditional_operator) {
|
||||
return conditional_operator_type::MISSING;
|
||||
}
|
||||
if (!conditional_operator->IsString()) {
|
||||
throw api_error::validation("'ConditionalOperator' parameter, if given, must be a string");
|
||||
}
|
||||
auto s = rjson::to_string_view(*conditional_operator);
|
||||
if (s == "AND") {
|
||||
return conditional_operator_type::AND;
|
||||
} else if (s == "OR") {
|
||||
return conditional_operator_type::OR;
|
||||
} else {
|
||||
throw api_error::validation(
|
||||
format("'ConditionalOperator' parameter must be AND, OR or missing. Found {}.", s));
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the existing values of the item (previous_item) match the
|
||||
// conditions given by the Expected and ConditionalOperator parameters
|
||||
// (if they exist) in the request (an UpdateItem, PutItem or DeleteItem).
|
||||
// This function can throw an ValidationException API error if there
|
||||
// are errors in the format of the condition itself.
|
||||
bool verify_expected(const rjson::value& req, const rjson::value* previous_item) {
|
||||
const rjson::value* expected = rjson::find(req, "Expected");
|
||||
auto conditional_operator = get_conditional_operator(req);
|
||||
if (conditional_operator != conditional_operator_type::MISSING &&
|
||||
(!expected || (expected->IsObject() && expected->GetObject().ObjectEmpty()))) {
|
||||
throw api_error::validation("'ConditionalOperator' parameter cannot be specified for missing or empty Expression");
|
||||
}
|
||||
if (!expected) {
|
||||
return true;
|
||||
}
|
||||
if (!expected->IsObject()) {
|
||||
throw api_error::validation("'Expected' parameter, if given, must be an object");
|
||||
}
|
||||
bool require_all = conditional_operator != conditional_operator_type::OR;
|
||||
return verify_condition(*expected, require_all, previous_item);
|
||||
}
|
||||
|
||||
bool verify_condition(const rjson::value& condition, bool require_all, const rjson::value* previous_item) {
|
||||
for (auto it = condition.MemberBegin(); it != condition.MemberEnd(); ++it) {
|
||||
const rjson::value* got = nullptr;
|
||||
if (previous_item) {
|
||||
got = rjson::find(*previous_item, rjson::to_string_view(it->name));
|
||||
}
|
||||
bool success = verify_expected_one(it->value, got);
|
||||
if (success && !require_all) {
|
||||
// When !require_all, one success is enough!
|
||||
return true;
|
||||
} else if (!success && require_all) {
|
||||
// When require_all, one failure is enough!
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// If we got here and require_all, none of the checks failed, so succeed.
|
||||
// If we got here and !require_all, all of the checks failed, so fail.
|
||||
return require_all;
|
||||
}
|
||||
|
||||
static bool calculate_primitive_condition(const parsed::primitive_condition& cond,
|
||||
const rjson::value* previous_item) {
|
||||
std::vector<rjson::value> calculated_values;
|
||||
calculated_values.reserve(cond._values.size());
|
||||
for (const parsed::value& v : cond._values) {
|
||||
calculated_values.push_back(calculate_value(v,
|
||||
cond._op == parsed::primitive_condition::type::VALUE ?
|
||||
calculate_value_caller::ConditionExpressionAlone :
|
||||
calculate_value_caller::ConditionExpression,
|
||||
previous_item));
|
||||
}
|
||||
switch (cond._op) {
|
||||
case parsed::primitive_condition::type::BETWEEN:
|
||||
if (calculated_values.size() != 3) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Wrong number of values {} in BETWEEN primitive_condition", cond._values.size()));
|
||||
}
|
||||
return check_BETWEEN(&calculated_values[0], calculated_values[1], calculated_values[2],
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant(), cond._values[2].is_constant());
|
||||
case parsed::primitive_condition::type::IN:
|
||||
return check_IN(calculated_values);
|
||||
case parsed::primitive_condition::type::VALUE:
|
||||
if (calculated_values.size() != 1) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Unexpected values in primitive_condition", cond._values.size()));
|
||||
}
|
||||
// Unwrap the boolean wrapped as the value (if it is a boolean)
|
||||
if (calculated_values[0].IsObject() && calculated_values[0].MemberCount() == 1) {
|
||||
auto it = calculated_values[0].MemberBegin();
|
||||
if (it->name == "BOOL" && it->value.IsBool()) {
|
||||
return it->value.GetBool();
|
||||
}
|
||||
}
|
||||
throw api_error::validation(
|
||||
format("ConditionExpression: condition results in a non-boolean value: {}",
|
||||
calculated_values[0]));
|
||||
default:
|
||||
// All the rest of the operators have exactly two parameters (and unless
|
||||
// we have a bug in the parser, that's what we have in the parsed object:
|
||||
if (calculated_values.size() != 2) {
|
||||
throw std::logic_error(format("Wrong number of values {} in primitive_condition object", cond._values.size()));
|
||||
}
|
||||
}
|
||||
switch (cond._op) {
|
||||
case parsed::primitive_condition::type::EQ:
|
||||
return check_EQ(&calculated_values[0], calculated_values[1]);
|
||||
case parsed::primitive_condition::type::NE:
|
||||
return check_NE(&calculated_values[0], calculated_values[1]);
|
||||
case parsed::primitive_condition::type::GT:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_gt{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
case parsed::primitive_condition::type::GE:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_ge{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
case parsed::primitive_condition::type::LT:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_lt{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
case parsed::primitive_condition::type::LE:
|
||||
return check_compare(&calculated_values[0], calculated_values[1], cmp_le{},
|
||||
cond._values[0].is_constant(), cond._values[1].is_constant());
|
||||
default:
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error(format("Unknown type {} in primitive_condition object", (int)(cond._op)));
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the existing values of the item (previous_item) match the
|
||||
// conditions given by the given parsed ConditionExpression.
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
const rjson::value* previous_item) {
|
||||
if (condition_expression.empty()) {
|
||||
return true;
|
||||
}
|
||||
bool ret = std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) -> bool {
|
||||
return calculate_primitive_condition(cond, previous_item);
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) -> bool {
|
||||
auto verify_condition = [&] (const parsed::condition_expression& e) {
|
||||
return verify_condition_expression(e, previous_item);
|
||||
};
|
||||
switch (list.op) {
|
||||
case '&':
|
||||
return boost::algorithm::all_of(list.conditions, verify_condition);
|
||||
case '|':
|
||||
return boost::algorithm::any_of(list.conditions, verify_condition);
|
||||
default:
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error("bad operator in condition_list");
|
||||
}
|
||||
}
|
||||
}, condition_expression._expression);
|
||||
return condition_expression._negated ? !ret : ret;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file contains definitions and functions related to placing conditions
|
||||
* on Alternator queries (equivalent of CQL's restrictions).
|
||||
*
|
||||
* With conditions, it's possible to add criteria to selection requests (Scan, Query)
|
||||
* and use them for narrowing down the result set, by means of filtering or indexing.
|
||||
*
|
||||
* Ref: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "expressions_types.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
enum class comparison_operator_type {
|
||||
EQ, NE, LE, LT, GE, GT, IN, BETWEEN, CONTAINS, NOT_CONTAINS, IS_NULL, NOT_NULL, BEGINS_WITH
|
||||
};
|
||||
|
||||
comparison_operator_type get_comparison_operator(const rjson::value& comparison_operator);
|
||||
|
||||
enum class conditional_operator_type {
|
||||
AND, OR, MISSING
|
||||
};
|
||||
conditional_operator_type get_conditional_operator(const rjson::value& req);
|
||||
|
||||
bool verify_expected(const rjson::value& req, const rjson::value* previous_item);
|
||||
bool verify_condition(const rjson::value& condition, bool require_all, const rjson::value* previous_item);
|
||||
|
||||
bool check_CONTAINS(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query);
|
||||
bool check_BEGINS_WITH(const rjson::value* v1, const rjson::value& v2, bool v1_from_query, bool v2_from_query);
|
||||
|
||||
bool verify_condition_expression(
|
||||
const parsed::condition_expression& condition_expression,
|
||||
const rjson::value* previous_item);
|
||||
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2021-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <seastar/net/dns.hh>
|
||||
#include "controller.hh"
|
||||
#include "server.hh"
|
||||
#include "executor.hh"
|
||||
#include "rmw_operation.hh"
|
||||
#include "db/config.hh"
|
||||
#include "cdc/generation_service.hh"
|
||||
#include "service/memory_limiter.hh"
|
||||
#include "auth/service.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
static logging::logger logger("alternator_controller");
|
||||
|
||||
controller::controller(
|
||||
sharded<gms::gossiper>& gossiper,
|
||||
sharded<service::storage_proxy>& proxy,
|
||||
sharded<service::migration_manager>& mm,
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
const db::config& config,
|
||||
seastar::scheduling_group sg)
|
||||
: protocol_server(sg)
|
||||
, _gossiper(gossiper)
|
||||
, _proxy(proxy)
|
||||
, _mm(mm)
|
||||
, _sys_dist_ks(sys_dist_ks)
|
||||
, _cdc_gen_svc(cdc_gen_svc)
|
||||
, _memory_limiter(memory_limiter)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _config(config)
|
||||
{
|
||||
}
|
||||
|
||||
sstring controller::name() const {
|
||||
return "alternator";
|
||||
}
|
||||
|
||||
sstring controller::protocol() const {
|
||||
return "dynamodb";
|
||||
}
|
||||
|
||||
sstring controller::protocol_version() const {
|
||||
return version;
|
||||
}
|
||||
|
||||
std::vector<socket_address> controller::listen_addresses() const {
|
||||
return _listen_addresses;
|
||||
}
|
||||
|
||||
future<> controller::start_server() {
|
||||
seastar::thread_attributes attr;
|
||||
attr.sched_group = _sched_group;
|
||||
return seastar::async(std::move(attr), [this] {
|
||||
_listen_addresses.clear();
|
||||
|
||||
auto preferred = _config.listen_interface_prefer_ipv6() ? std::make_optional(net::inet_address::family::INET6) : std::nullopt;
|
||||
auto family = _config.enable_ipv6_dns_lookup() || preferred ? std::nullopt : std::make_optional(net::inet_address::family::INET);
|
||||
|
||||
// Create an smp_service_group to be used for limiting the
|
||||
// concurrency when forwarding Alternator request between
|
||||
// shards - if necessary for LWT.
|
||||
smp_service_group_config c;
|
||||
c.max_nonlocal_requests = 5000;
|
||||
_ssg = create_smp_service_group(c).get();
|
||||
|
||||
rmw_operation::set_default_write_isolation(_config.alternator_write_isolation());
|
||||
|
||||
net::inet_address addr = utils::resolve(_config.alternator_address, family).get();
|
||||
|
||||
auto get_cdc_metadata = [] (cdc::generation_service& svc) { return std::ref(svc.get_cdc_metadata()); };
|
||||
auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> {
|
||||
return cfg.alternator_timeout_in_ms;
|
||||
};
|
||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_mm), std::ref(_sys_dist_ks),
|
||||
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value(),
|
||||
sharded_parameter(get_timeout_in_ms, std::ref(_config))).get();
|
||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
|
||||
// Note: from this point on, if start_server() throws for any reason,
|
||||
// it must first call stop_server() to stop the executor and server
|
||||
// services we just started - or Scylla will cause an assertion
|
||||
// failure when the controller object is destroyed in the exception
|
||||
// unwinding.
|
||||
std::optional<uint16_t> alternator_port;
|
||||
if (_config.alternator_port()) {
|
||||
alternator_port = _config.alternator_port();
|
||||
_listen_addresses.push_back({addr, *alternator_port});
|
||||
}
|
||||
std::optional<uint16_t> alternator_https_port;
|
||||
std::optional<tls::credentials_builder> creds;
|
||||
if (_config.alternator_https_port()) {
|
||||
alternator_https_port = _config.alternator_https_port();
|
||||
_listen_addresses.push_back({addr, *alternator_https_port});
|
||||
creds.emplace();
|
||||
auto opts = _config.alternator_encryption_options();
|
||||
if (opts.empty()) {
|
||||
// Earlier versions mistakenly configured Alternator's
|
||||
// HTTPS parameters via the "server_encryption_option"
|
||||
// configuration parameter. We *temporarily* continue
|
||||
// to allow this, for backward compatibility.
|
||||
opts = _config.server_encryption_options();
|
||||
if (!opts.empty()) {
|
||||
logger.warn("Setting server_encryption_options to configure "
|
||||
"Alternator's HTTPS encryption is deprecated. Please "
|
||||
"switch to setting alternator_encryption_options instead.");
|
||||
}
|
||||
}
|
||||
opts.erase("require_client_auth");
|
||||
opts.erase("truststore");
|
||||
try {
|
||||
utils::configure_tls_creds_builder(creds.value(), std::move(opts)).get();
|
||||
} catch(...) {
|
||||
logger.error("Failed to set up Alternator TLS credentials: {}", std::current_exception());
|
||||
stop_server().get();
|
||||
std::throw_with_nested(std::runtime_error("Failed to set up Alternator TLS credentials"));
|
||||
}
|
||||
}
|
||||
bool alternator_enforce_authorization = _config.alternator_enforce_authorization();
|
||||
_server.invoke_on_all(
|
||||
[this, addr, alternator_port, alternator_https_port, creds = std::move(creds), alternator_enforce_authorization] (server& server) mutable {
|
||||
return server.init(addr, alternator_port, alternator_https_port, creds, alternator_enforce_authorization,
|
||||
&_memory_limiter.local().get_semaphore(),
|
||||
_config.max_concurrent_requests_per_shard);
|
||||
}).handle_exception([this, addr, alternator_port, alternator_https_port] (std::exception_ptr ep) {
|
||||
logger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}: {}",
|
||||
addr, alternator_port ? std::to_string(*alternator_port) : "OFF", alternator_https_port ? std::to_string(*alternator_https_port) : "OFF", ep);
|
||||
return stop_server().then([ep = std::move(ep)] { return make_exception_future<>(ep); });
|
||||
}).then([addr, alternator_port, alternator_https_port] {
|
||||
logger.info("Alternator server listening on {}, HTTP port {}, HTTPS port {}",
|
||||
addr, alternator_port ? std::to_string(*alternator_port) : "OFF", alternator_https_port ? std::to_string(*alternator_https_port) : "OFF");
|
||||
}).get();
|
||||
});
|
||||
}
|
||||
|
||||
future<> controller::stop_server() {
|
||||
return seastar::async([this] {
|
||||
if (!_ssg) {
|
||||
return;
|
||||
}
|
||||
_server.stop().get();
|
||||
_executor.stop().get();
|
||||
_listen_addresses.clear();
|
||||
destroy_smp_service_group(_ssg.value()).get();
|
||||
});
|
||||
}
|
||||
|
||||
future<> controller::request_stop_server() {
|
||||
return with_scheduling_group(_sched_group, [this] {
|
||||
return stop_server();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2021-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include <seastar/core/smp.hh>
|
||||
|
||||
#include "protocol_server.hh"
|
||||
|
||||
namespace service {
|
||||
class storage_proxy;
|
||||
class migration_manager;
|
||||
class memory_limiter;
|
||||
}
|
||||
|
||||
namespace db {
|
||||
class system_distributed_keyspace;
|
||||
class config;
|
||||
}
|
||||
|
||||
namespace cdc {
|
||||
class generation_service;
|
||||
}
|
||||
|
||||
namespace gms {
|
||||
|
||||
class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth {
|
||||
class service;
|
||||
}
|
||||
|
||||
namespace qos {
|
||||
class service_level_controller;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// This is the official DynamoDB API version.
|
||||
// It represents the last major reorganization of that API, and all the features
|
||||
// that were added since did NOT increment this version string.
|
||||
constexpr const char* version = "2012-08-10";
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
class executor;
|
||||
class server;
|
||||
|
||||
class controller : public protocol_server {
|
||||
sharded<gms::gossiper>& _gossiper;
|
||||
sharded<service::storage_proxy>& _proxy;
|
||||
sharded<service::migration_manager>& _mm;
|
||||
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
||||
sharded<cdc::generation_service>& _cdc_gen_svc;
|
||||
sharded<service::memory_limiter>& _memory_limiter;
|
||||
sharded<auth::service>& _auth_service;
|
||||
sharded<qos::service_level_controller>& _sl_controller;
|
||||
const db::config& _config;
|
||||
|
||||
std::vector<socket_address> _listen_addresses;
|
||||
sharded<executor> _executor;
|
||||
sharded<server> _server;
|
||||
std::optional<smp_service_group> _ssg;
|
||||
|
||||
public:
|
||||
controller(
|
||||
sharded<gms::gossiper>& gossiper,
|
||||
sharded<service::storage_proxy>& proxy,
|
||||
sharded<service::migration_manager>& mm,
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
const db::config& config,
|
||||
seastar::scheduling_group sg);
|
||||
|
||||
virtual sstring name() const override;
|
||||
virtual sstring protocol() const override;
|
||||
virtual sstring protocol_version() const override;
|
||||
virtual std::vector<socket_address> listen_addresses() const override;
|
||||
virtual future<> start_server() override;
|
||||
virtual future<> stop_server() override;
|
||||
virtual future<> request_stop_server() override;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include "seastarx.hh"
|
||||
#include "utils/rjson.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// api_error contains a DynamoDB error message to be returned to the user.
|
||||
// It can be returned by value (see executor::request_return_type) or thrown.
|
||||
// The DynamoDB's error messages are described in detail in
|
||||
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html
|
||||
// An error message has an HTTP code (almost always 400), a type, e.g.,
|
||||
// "ResourceNotFoundException", and a human readable message.
|
||||
// Eventually alternator::api_handler will convert a returned or thrown
|
||||
// api_error into a JSON object, and that is returned to the user.
|
||||
class api_error final : public std::exception {
|
||||
public:
|
||||
using status_type = http::reply::status_type;
|
||||
status_type _http_code;
|
||||
std::string _type;
|
||||
std::string _msg;
|
||||
// Additional data attached to the error, null value if not set. It's wrapped in copyable_value
|
||||
// class because copy constructor is required for exception classes otherwise it won't compile
|
||||
// (despite that its use may be optimized away).
|
||||
rjson::copyable_value _extra_fields;
|
||||
api_error(std::string type, std::string msg, status_type http_code = status_type::bad_request,
|
||||
rjson::value extra_fields = rjson::null_value())
|
||||
: _http_code(std::move(http_code))
|
||||
, _type(std::move(type))
|
||||
, _msg(std::move(msg))
|
||||
, _extra_fields(std::move(extra_fields))
|
||||
{ }
|
||||
|
||||
// Factory functions for some common types of DynamoDB API errors
|
||||
static api_error validation(std::string msg) {
|
||||
return api_error("ValidationException", std::move(msg));
|
||||
}
|
||||
static api_error resource_not_found(std::string msg) {
|
||||
return api_error("ResourceNotFoundException", std::move(msg));
|
||||
}
|
||||
static api_error resource_in_use(std::string msg) {
|
||||
return api_error("ResourceInUseException", std::move(msg));
|
||||
}
|
||||
static api_error invalid_signature(std::string msg) {
|
||||
return api_error("InvalidSignatureException", std::move(msg));
|
||||
}
|
||||
static api_error missing_authentication_token(std::string msg) {
|
||||
return api_error("MissingAuthenticationTokenException", std::move(msg));
|
||||
}
|
||||
static api_error unrecognized_client(std::string msg) {
|
||||
return api_error("UnrecognizedClientException", std::move(msg));
|
||||
}
|
||||
static api_error unknown_operation(std::string msg) {
|
||||
return api_error("UnknownOperationException", std::move(msg));
|
||||
}
|
||||
static api_error access_denied(std::string msg) {
|
||||
return api_error("AccessDeniedException", std::move(msg));
|
||||
}
|
||||
static api_error conditional_check_failed(std::string msg, rjson::value&& item) {
|
||||
if (!item.IsNull()) {
|
||||
auto tmp = rjson::empty_object();
|
||||
rjson::add(tmp, "Item", std::move(item));
|
||||
item = std::move(tmp);
|
||||
}
|
||||
return api_error("ConditionalCheckFailedException", std::move(msg), status_type::bad_request, std::move(item));
|
||||
}
|
||||
static api_error expired_iterator(std::string msg) {
|
||||
return api_error("ExpiredIteratorException", std::move(msg));
|
||||
}
|
||||
static api_error trimmed_data_access_exception(std::string msg) {
|
||||
return api_error("TrimmedDataAccessException", std::move(msg));
|
||||
}
|
||||
static api_error request_limit_exceeded(std::string msg) {
|
||||
return api_error("RequestLimitExceeded", std::move(msg));
|
||||
}
|
||||
static api_error serialization(std::string msg) {
|
||||
return api_error("SerializationException", std::move(msg));
|
||||
}
|
||||
static api_error table_not_found(std::string msg) {
|
||||
return api_error("TableNotFoundException", std::move(msg));
|
||||
}
|
||||
static api_error internal(std::string msg) {
|
||||
return api_error("InternalServerError", std::move(msg), http::reply::status_type::internal_server_error);
|
||||
}
|
||||
|
||||
// Provide the "std::exception" interface, to make it easier to print this
|
||||
// exception in log messages. Note that this function is *not* used to
|
||||
// format the error to send it back to the client - server.cc has
|
||||
// generate_error_reply() to format an api_error as the DynamoDB protocol
|
||||
// requires.
|
||||
virtual const char* what() const noexcept override;
|
||||
mutable std::string _what_string;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,265 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/future.hh>
|
||||
#include "seastarx.hh"
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <seastar/core/sharded.hh>
|
||||
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/client_state.hh"
|
||||
#include "service_permit.hh"
|
||||
#include "db/timeout_clock.hh"
|
||||
|
||||
#include "alternator/error.hh"
|
||||
#include "stats.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
|
||||
namespace db {
|
||||
class system_distributed_keyspace;
|
||||
}
|
||||
|
||||
namespace query {
|
||||
class partition_slice;
|
||||
class result;
|
||||
}
|
||||
|
||||
namespace cql3::selection {
|
||||
class selection;
|
||||
}
|
||||
|
||||
namespace service {
|
||||
class storage_proxy;
|
||||
}
|
||||
|
||||
namespace cdc {
|
||||
class metadata;
|
||||
}
|
||||
|
||||
namespace gms {
|
||||
|
||||
class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
class rmw_operation;
|
||||
|
||||
struct make_jsonable : public json::jsonable {
|
||||
rjson::value _value;
|
||||
public:
|
||||
explicit make_jsonable(rjson::value&& value);
|
||||
std::string to_json() const override;
|
||||
};
|
||||
|
||||
/**
|
||||
* Make return type for serializing the object "streamed",
|
||||
* i.e. direct to HTTP output stream. Note: only useful for
|
||||
* (very) large objects as there are overhead issues with this
|
||||
* as well, but for massive lists of return objects this can
|
||||
* help avoid large allocations/many re-allocs
|
||||
*/
|
||||
json::json_return_type make_streamed(rjson::value&&);
|
||||
|
||||
struct json_string : public json::jsonable {
|
||||
std::string _value;
|
||||
public:
|
||||
explicit json_string(std::string&& value);
|
||||
std::string to_json() const override;
|
||||
};
|
||||
|
||||
namespace parsed {
|
||||
class path;
|
||||
};
|
||||
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
// Wraps the db::get_tags_of_table and throws if the table is missing the tags extension.
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
|
||||
|
||||
// An attribute_path_map object is used to hold data for various attributes
|
||||
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
|
||||
// has a root attribute, and then modified by member and index operators -
|
||||
// for example in "a.b[2].c" we have "a" as the root, then ".b" member, then
|
||||
// "[2]" index, and finally ".c" member.
|
||||
// Data can be added to an attribute_path_map using the add() function, but
|
||||
// requires that attributes with data not be *overlapping* or *conflicting*:
|
||||
//
|
||||
// 1. Two attribute paths which are identical or an ancestor of one another
|
||||
// are considered *overlapping* and not allowed. If a.b.c has data,
|
||||
// we can't add more data in a.b.c or any of its descendants like a.b.c.d.
|
||||
//
|
||||
// 2. Two attribute paths which need the same parent to have both a member and
|
||||
// an index are considered *conflicting* and not allowed. E.g., if a.b has
|
||||
// data, you can't add a[1]. The meaning of adding both would be that the
|
||||
// attribute a is both a map and an array, which isn't sensible.
|
||||
//
|
||||
// These two requirements are common to the two places where Alternator uses
|
||||
// this abstraction to describe how a hierarchical item is to be transformed:
|
||||
//
|
||||
// 1. In ProjectExpression: for filtering from a full top-level attribute
|
||||
// only the parts for which user asked in ProjectionExpression.
|
||||
//
|
||||
// 2. In UpdateExpression: for taking the previous value of a top-level
|
||||
// attribute, and modifying it based on the instructions in the user
|
||||
// wrote in UpdateExpression.
|
||||
|
||||
template<typename T>
|
||||
class attribute_path_map_node {
|
||||
public:
|
||||
using data_t = T;
|
||||
// We need the extra unique_ptr<> here because libstdc++ unordered_map
|
||||
// doesn't work with incomplete types :-(
|
||||
using members_t = std::unordered_map<std::string, std::unique_ptr<attribute_path_map_node<T>>>;
|
||||
// The indexes list is sorted because DynamoDB requires handling writes
|
||||
// beyond the end of a list in index order.
|
||||
using indexes_t = std::map<unsigned, std::unique_ptr<attribute_path_map_node<T>>>;
|
||||
// The prohibition on "overlap" and "conflict" explained above means
|
||||
// That only one of data, members or indexes is non-empty.
|
||||
std::optional<std::variant<data_t, members_t, indexes_t>> _content;
|
||||
|
||||
bool is_empty() const { return !_content; }
|
||||
bool has_value() const { return _content && std::holds_alternative<data_t>(*_content); }
|
||||
bool has_members() const { return _content && std::holds_alternative<members_t>(*_content); }
|
||||
bool has_indexes() const { return _content && std::holds_alternative<indexes_t>(*_content); }
|
||||
// get_members() assumes that has_members() is true
|
||||
members_t& get_members() { return std::get<members_t>(*_content); }
|
||||
const members_t& get_members() const { return std::get<members_t>(*_content); }
|
||||
indexes_t& get_indexes() { return std::get<indexes_t>(*_content); }
|
||||
const indexes_t& get_indexes() const { return std::get<indexes_t>(*_content); }
|
||||
T& get_value() { return std::get<T>(*_content); }
|
||||
const T& get_value() const { return std::get<T>(*_content); }
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
|
||||
|
||||
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
|
||||
// attrs_to_get lists which top-level attribute are needed, and possibly also
|
||||
// which part of the top-level attribute is really needed (when nested
|
||||
// attribute paths appeared in the query).
|
||||
// Most code actually uses optional<attrs_to_get>. There, a disengaged
|
||||
// optional means we should get all attributes, not specific ones.
|
||||
using attrs_to_get = attribute_path_map<std::monostate>;
|
||||
|
||||
|
||||
class executor : public peering_sharded_service<executor> {
|
||||
gms::gossiper& _gossiper;
|
||||
service::storage_proxy& _proxy;
|
||||
service::migration_manager& _mm;
|
||||
db::system_distributed_keyspace& _sdks;
|
||||
cdc::metadata& _cdc_metadata;
|
||||
// An smp_service_group to be used for limiting the concurrency when
|
||||
// forwarding Alternator request between shards - if necessary for LWT.
|
||||
smp_service_group _ssg;
|
||||
|
||||
public:
|
||||
using client_state = service::client_state;
|
||||
using request_return_type = std::variant<json::json_return_type, api_error>;
|
||||
stats _stats;
|
||||
static constexpr auto ATTRS_COLUMN_NAME = ":attrs";
|
||||
static constexpr auto KEYSPACE_NAME_PREFIX = "alternator_";
|
||||
static constexpr std::string_view INTERNAL_TABLE_PREFIX = ".scylla.alternator.";
|
||||
|
||||
executor(gms::gossiper& gossiper,
|
||||
service::storage_proxy& proxy,
|
||||
service::migration_manager& mm,
|
||||
db::system_distributed_keyspace& sdks,
|
||||
cdc::metadata& cdc_metadata,
|
||||
smp_service_group ssg,
|
||||
utils::updateable_value<uint32_t> default_timeout_in_ms)
|
||||
: _gossiper(gossiper), _proxy(proxy), _mm(mm), _sdks(sdks), _cdc_metadata(cdc_metadata), _ssg(ssg) {
|
||||
s_default_timeout_in_ms = std::move(default_timeout_in_ms);
|
||||
}
|
||||
|
||||
future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> delete_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> put_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> delete_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_tables(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> scan(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_endpoints(client_state& client_state, service_permit permit, rjson::value request, std::string host_header);
|
||||
future<request_return_type> batch_write_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> batch_get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> query(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> tag_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> untag_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_tags_of_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_time_to_live(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_streams(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request);
|
||||
|
||||
future<> start();
|
||||
future<> stop() {
|
||||
// disconnect from the value source, but keep the value unchanged.
|
||||
s_default_timeout_in_ms = utils::updateable_value<uint32_t>{s_default_timeout_in_ms()};
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
static sstring table_name(const schema&);
|
||||
static db::timeout_clock::time_point default_timeout();
|
||||
private:
|
||||
static thread_local utils::updateable_value<uint32_t> s_default_timeout_in_ms;
|
||||
public:
|
||||
static schema_ptr find_table(service::storage_proxy&, const rjson::value& request);
|
||||
|
||||
private:
|
||||
friend class rmw_operation;
|
||||
|
||||
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr);
|
||||
|
||||
public:
|
||||
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&);
|
||||
|
||||
static std::optional<rjson::value> describe_single_item(schema_ptr,
|
||||
const query::partition_slice&,
|
||||
const cql3::selection::selection&,
|
||||
const query::result&,
|
||||
const std::optional<attrs_to_get>&);
|
||||
|
||||
static future<std::vector<rjson::value>> describe_multi_item(schema_ptr schema,
|
||||
const query::partition_slice&& slice,
|
||||
shared_ptr<cql3::selection::selection> selection,
|
||||
foreign_ptr<lw_shared_ptr<query::result>> query_result,
|
||||
shared_ptr<const std::optional<attrs_to_get>> attrs_to_get);
|
||||
|
||||
static void describe_single_item(const cql3::selection::selection&,
|
||||
const std::vector<managed_bytes_opt>&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
rjson::value&,
|
||||
bool = false);
|
||||
|
||||
static void add_stream_options(const rjson::value& stream_spec, schema_builder&, service::storage_proxy& sp);
|
||||
static void supplement_table_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
|
||||
static void supplement_table_stream_info(rjson::value& descr, const schema& schema, const service::storage_proxy& sp);
|
||||
};
|
||||
|
||||
// is_big() checks approximately if the given JSON value is "bigger" than
|
||||
// the given big_size number of bytes. The goal is to *quickly* detect
|
||||
// oversized JSON that, for example, is too large to be serialized to a
|
||||
// contiguous string - we don't need an accurate size for that. Moreover,
|
||||
// as soon as we detect that the JSON is indeed "big", we can return true
|
||||
// and don't need to continue calculating its exact size.
|
||||
// For simplicity, we use a recursive implementation. This is fine because
|
||||
// Alternator limits the depth of JSONs it reads from inputs, and doesn't
|
||||
// add more than a couple of levels in its own output construction.
|
||||
bool is_big(const rjson::value& val, int big_size = 100'000);
|
||||
|
||||
}
|
||||
@@ -1,760 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "expressions.hh"
|
||||
#include "serialization.hh"
|
||||
#include "utils/base64.hh"
|
||||
#include "conditions.hh"
|
||||
#include "alternator/expressionsLexer.hpp"
|
||||
#include "alternator/expressionsParser.hpp"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "error.hh"
|
||||
|
||||
#include "seastarx.hh"
|
||||
|
||||
#include <seastar/core/print.hh>
|
||||
#include <seastar/util/log.hh>
|
||||
|
||||
#include <boost/algorithm/cxx11/any_of.hpp>
|
||||
#include <boost/algorithm/cxx11/all_of.hpp>
|
||||
|
||||
#include <functional>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
template <typename Func, typename Result = std::invoke_result_t<Func, expressionsParser&>>
|
||||
static Result do_with_parser(std::string_view input, Func&& f) {
|
||||
expressionsLexer::InputStreamType input_stream{
|
||||
reinterpret_cast<const ANTLR_UINT8*>(input.data()),
|
||||
ANTLR_ENC_UTF8,
|
||||
static_cast<ANTLR_UINT32>(input.size()),
|
||||
nullptr };
|
||||
expressionsLexer lexer(&input_stream);
|
||||
expressionsParser::TokenStreamType tstream(ANTLR_SIZE_HINT, lexer.get_tokSource());
|
||||
expressionsParser parser(&tstream);
|
||||
|
||||
auto result = f(parser);
|
||||
return result;
|
||||
}
|
||||
|
||||
template <typename Func, typename Result = std::invoke_result_t<Func, expressionsParser&>>
|
||||
static Result parse(const char* input_name, std::string_view input, Func&& f) {
|
||||
if (input.length() > 4096) {
|
||||
throw expressions_syntax_error(format("{} expression size {} exceeds allowed maximum 4096.",
|
||||
input_name, input.length()));
|
||||
}
|
||||
try {
|
||||
return do_with_parser(input, f);
|
||||
} catch (expressions_syntax_error& e) {
|
||||
// If already an expressions_syntax_error, don't print the type's
|
||||
// name (it's just ugly), just the message.
|
||||
// TODO: displayRecognitionError could set a position inside the
|
||||
// expressions_syntax_error in throws, and we could use it here to
|
||||
// mark the broken position in 'input'.
|
||||
throw expressions_syntax_error(format("Failed parsing {} '{}': {}",
|
||||
input_name, input, e.what()));
|
||||
} catch (...) {
|
||||
throw expressions_syntax_error(format("Failed parsing {} '{}': {}",
|
||||
input_name, input, std::current_exception()));
|
||||
}
|
||||
}
|
||||
|
||||
parsed::update_expression
|
||||
parse_update_expression(std::string_view query) {
|
||||
return parse("UpdateExpression", query, std::mem_fn(&expressionsParser::update_expression));
|
||||
}
|
||||
|
||||
std::vector<parsed::path>
|
||||
parse_projection_expression(std::string_view query) {
|
||||
return parse ("ProjectionExpression", query, std::mem_fn(&expressionsParser::projection_expression));
|
||||
}
|
||||
|
||||
parsed::condition_expression
|
||||
parse_condition_expression(std::string_view query, const char* caller) {
|
||||
return parse(caller, query, std::mem_fn(&expressionsParser::condition_expression));
|
||||
}
|
||||
|
||||
namespace parsed {
|
||||
|
||||
void update_expression::add(update_expression::action a) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (action::set&) { seen_set = true; },
|
||||
[&] (action::remove&) { seen_remove = true; },
|
||||
[&] (action::add&) { seen_add = true; },
|
||||
[&] (action::del&) { seen_del = true; }
|
||||
}, a._action);
|
||||
_actions.push_back(std::move(a));
|
||||
}
|
||||
|
||||
void update_expression::append(update_expression other) {
|
||||
if ((seen_set && other.seen_set) ||
|
||||
(seen_remove && other.seen_remove) ||
|
||||
(seen_add && other.seen_add) ||
|
||||
(seen_del && other.seen_del)) {
|
||||
throw expressions_syntax_error("Each of SET, REMOVE, ADD, DELETE may only appear once in UpdateExpression");
|
||||
}
|
||||
std::move(other._actions.begin(), other._actions.end(), std::back_inserter(_actions));
|
||||
seen_set |= other.seen_set;
|
||||
seen_remove |= other.seen_remove;
|
||||
seen_add |= other.seen_add;
|
||||
seen_del |= other.seen_del;
|
||||
}
|
||||
|
||||
void condition_expression::append(condition_expression&& a, char op) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (condition_list& x) {
|
||||
// If 'a' has a single condition, we could, instead of inserting
|
||||
// it insert its single condition (possibly negated if a._negated)
|
||||
// But considering it we don't evaluate these expressions many
|
||||
// times, this optimization is not worth extra code complexity.
|
||||
if (!x.conditions.empty() && x.op != op) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error("condition_expression::append called with mixed operators");
|
||||
}
|
||||
x.conditions.push_back(std::move(a));
|
||||
x.op = op;
|
||||
},
|
||||
[&] (primitive_condition& x) {
|
||||
// Shouldn't happen unless we have a bug in the parser
|
||||
throw std::logic_error("condition_expression::append called on primitive_condition");
|
||||
}
|
||||
}, _expression);
|
||||
}
|
||||
|
||||
void path::check_depth_limit() {
|
||||
if (1 + _operators.size() > depth_limit) {
|
||||
throw expressions_syntax_error(format("Document path exceeded {} nesting levels", depth_limit));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace parsed
|
||||
|
||||
// The following resolve_*() functions resolve references in parsed
|
||||
// expressions of different types. Resolving a parsed expression means
|
||||
// replacing:
|
||||
// 1. In parsed::path objects, replace references like "#name" with the
|
||||
// attribute name from ExpressionAttributeNames,
|
||||
// 2. In parsed::constant objects, replace references like ":value" with
|
||||
// the value from ExpressionAttributeValues.
|
||||
// These function also track which name and value references were used, to
|
||||
// allow complaining if some remain unused.
|
||||
// Note that the resolve_*() functions modify the expressions in-place,
|
||||
// so if we ever intend to cache parsed expression, we need to pass a copy
|
||||
// into this function.
|
||||
//
|
||||
// Doing the "resolving" stage before the evaluation stage has two benefits.
|
||||
// First, it allows us to be compatible with DynamoDB in catching unused
|
||||
// names and values (see issue #6572). Second, in the FilterExpression case,
|
||||
// we need to resolve the expression just once but then use it many times
|
||||
// (once for each item to be filtered).
|
||||
|
||||
static std::optional<std::string> resolve_path_component(const std::string& column_name,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names) {
|
||||
if (column_name.size() > 0 && column_name.front() == '#') {
|
||||
if (!expression_attribute_names) {
|
||||
throw api_error::validation(
|
||||
format("ExpressionAttributeNames missing, entry '{}' required by expression", column_name));
|
||||
}
|
||||
const rjson::value* value = rjson::find(*expression_attribute_names, column_name);
|
||||
if (!value || !value->IsString()) {
|
||||
throw api_error::validation(
|
||||
format("ExpressionAttributeNames missing entry '{}' required by expression", column_name));
|
||||
}
|
||||
used_attribute_names.emplace(column_name);
|
||||
return std::string(rjson::to_string_view(*value));
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
static void resolve_path(parsed::path& p,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names) {
|
||||
std::optional<std::string> r = resolve_path_component(p.root(), expression_attribute_names, used_attribute_names);
|
||||
if (r) {
|
||||
p.set_root(std::move(*r));
|
||||
}
|
||||
for (auto& op : p.operators()) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (std::string& s) {
|
||||
r = resolve_path_component(s, expression_attribute_names, used_attribute_names);
|
||||
if (r) {
|
||||
s = std::move(*r);
|
||||
}
|
||||
},
|
||||
[&] (unsigned index) {
|
||||
// nothing to resolve
|
||||
}
|
||||
}, op);
|
||||
}
|
||||
}
|
||||
|
||||
static void resolve_constant(parsed::constant& c,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const std::string& valref) {
|
||||
if (!expression_attribute_values) {
|
||||
throw api_error::validation(
|
||||
format("ExpressionAttributeValues missing, entry '{}' required by expression", valref));
|
||||
}
|
||||
const rjson::value* value = rjson::find(*expression_attribute_values, valref);
|
||||
if (!value) {
|
||||
throw api_error::validation(
|
||||
format("ExpressionAttributeValues missing entry '{}' required by expression", valref));
|
||||
}
|
||||
if (value->IsNull()) {
|
||||
throw api_error::validation(
|
||||
format("ExpressionAttributeValues null value for entry '{}' required by expression", valref));
|
||||
}
|
||||
validate_value(*value, "ExpressionAttributeValues");
|
||||
used_attribute_values.emplace(valref);
|
||||
c.set(*value);
|
||||
},
|
||||
[&] (const parsed::constant::literal& lit) {
|
||||
// Nothing to do, already resolved
|
||||
}
|
||||
}, c._value);
|
||||
|
||||
}
|
||||
|
||||
void resolve_value(parsed::value& rhs,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (parsed::constant& c) {
|
||||
resolve_constant(c, expression_attribute_values, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::value::function_call& f) {
|
||||
for (parsed::value& value : f._parameters) {
|
||||
resolve_value(value, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
}
|
||||
},
|
||||
[&] (parsed::path& p) {
|
||||
resolve_path(p, expression_attribute_names, used_attribute_names);
|
||||
}
|
||||
}, rhs._value);
|
||||
}
|
||||
|
||||
void resolve_set_rhs(parsed::set_rhs& rhs,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
resolve_value(rhs._v1, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
if (rhs._op != 'v') {
|
||||
resolve_value(rhs._v2, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
}
|
||||
}
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
for (parsed::update_expression::action& action : ue.actions()) {
|
||||
resolve_path(action._path, expression_attribute_names, used_attribute_names);
|
||||
std::visit(overloaded_functor {
|
||||
[&] (parsed::update_expression::action::set& a) {
|
||||
resolve_set_rhs(a._rhs, expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::update_expression::action::remove& a) {
|
||||
// nothing to do
|
||||
},
|
||||
[&] (parsed::update_expression::action::add& a) {
|
||||
resolve_constant(a._valref, expression_attribute_values, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::update_expression::action::del& a) {
|
||||
resolve_constant(a._valref, expression_attribute_values, used_attribute_values);
|
||||
}
|
||||
}, action._action);
|
||||
}
|
||||
}
|
||||
|
||||
static void resolve_primitive_condition(parsed::primitive_condition& pc,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
for (parsed::value& value : pc._values) {
|
||||
resolve_value(value,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
}
|
||||
}
|
||||
|
||||
void resolve_condition_expression(parsed::condition_expression& ce,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (parsed::primitive_condition& cond) {
|
||||
resolve_primitive_condition(cond,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
},
|
||||
[&] (parsed::condition_expression::condition_list& list) {
|
||||
for (parsed::condition_expression& cond : list.conditions) {
|
||||
resolve_condition_expression(cond,
|
||||
expression_attribute_names, expression_attribute_values,
|
||||
used_attribute_names, used_attribute_values);
|
||||
|
||||
}
|
||||
}
|
||||
}, ce._expression);
|
||||
}
|
||||
|
||||
void resolve_projection_expression(std::vector<parsed::path>& pe,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names) {
|
||||
for (parsed::path& p : pe) {
|
||||
resolve_path(p, expression_attribute_names, used_attribute_names);
|
||||
}
|
||||
}
|
||||
|
||||
// condition_expression_on() checks whether a condition_expression places any
|
||||
// condition on the given attribute. It can be useful, for example, for
|
||||
// checking whether the condition tries to restrict a key column.
|
||||
|
||||
static bool value_on(const parsed::value& v, std::string_view attribute) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant& c) {
|
||||
return false;
|
||||
},
|
||||
[&] (const parsed::value::function_call& f) {
|
||||
for (const parsed::value& value : f._parameters) {
|
||||
if (value_on(value, attribute)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
},
|
||||
[&] (const parsed::path& p) {
|
||||
return p.root() == attribute;
|
||||
}
|
||||
}, v._value);
|
||||
}
|
||||
|
||||
static bool primitive_condition_on(const parsed::primitive_condition& pc, std::string_view attribute) {
|
||||
for (const parsed::value& value : pc._values) {
|
||||
if (value_on(value, attribute)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool condition_expression_on(const parsed::condition_expression& ce, std::string_view attribute) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) {
|
||||
return primitive_condition_on(cond, attribute);
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) {
|
||||
for (const parsed::condition_expression& cond : list.conditions) {
|
||||
if (condition_expression_on(cond, attribute)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}, ce._expression);
|
||||
}
|
||||
|
||||
// for_condition_expression_on() runs a given function over all the attributes
|
||||
// mentioned in the expression. If the same attribute is mentioned more than
|
||||
// once, the function will be called more than once for the same attribute.
|
||||
|
||||
static void for_value_on(const parsed::value& v, const noncopyable_function<void(std::string_view)>& func) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant& c) { },
|
||||
[&] (const parsed::value::function_call& f) {
|
||||
for (const parsed::value& value : f._parameters) {
|
||||
for_value_on(value, func);
|
||||
}
|
||||
},
|
||||
[&] (const parsed::path& p) {
|
||||
func(p.root());
|
||||
}
|
||||
}, v._value);
|
||||
}
|
||||
|
||||
void for_condition_expression_on(const parsed::condition_expression& ce, const noncopyable_function<void(std::string_view)>& func) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const parsed::primitive_condition& cond) {
|
||||
for (const parsed::value& value : cond._values) {
|
||||
for_value_on(value, func);
|
||||
}
|
||||
},
|
||||
[&] (const parsed::condition_expression::condition_list& list) {
|
||||
for (const parsed::condition_expression& cond : list.conditions) {
|
||||
for_condition_expression_on(cond, func);
|
||||
}
|
||||
}
|
||||
}, ce._expression);
|
||||
}
|
||||
|
||||
// The following calculate_value() functions calculate, or evaluate, a parsed
|
||||
// expression. The parsed expression is assumed to have been "resolved", with
|
||||
// the matching resolve_* function.
|
||||
|
||||
// calculate_size() is ConditionExpression's size() function, i.e., it takes
|
||||
// a JSON-encoded value and returns its "size" as defined differently for the
|
||||
// different types - also as a JSON-encoded number.
|
||||
// If the value's type (e.g. number) has no size defined, there are two cases:
|
||||
// 1. If from_data (the value came directly from an attribute of the data),
|
||||
// It returns a JSON-encoded "null" value. Comparisons against this
|
||||
// non-numeric value will later fail, so eventually the application will
|
||||
// get a ConditionalCheckFailedException.
|
||||
// 2. Otherwise (the value came from a constant in the query or some other
|
||||
// calculation), throw a ValidationException.
|
||||
static rjson::value calculate_size(const rjson::value& v, bool from_data) {
|
||||
// NOTE: If v is improperly formatted for our JSON value encoding, it
|
||||
// must come from the request itself, not from the database, so it makes
|
||||
// sense to throw a ValidationException if we see such a problem.
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error::validation(format("invalid object: {}", v));
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
int ret;
|
||||
if (it->name == "S") {
|
||||
if (!it->value.IsString()) {
|
||||
throw api_error::validation(format("invalid string: {}", v));
|
||||
}
|
||||
ret = it->value.GetStringLength();
|
||||
} else if (it->name == "NS" || it->name == "SS" || it->name == "BS" || it->name == "L") {
|
||||
if (!it->value.IsArray()) {
|
||||
throw api_error::validation(format("invalid set: {}", v));
|
||||
}
|
||||
ret = it->value.Size();
|
||||
} else if (it->name == "M") {
|
||||
if (!it->value.IsObject()) {
|
||||
throw api_error::validation(format("invalid map: {}", v));
|
||||
}
|
||||
ret = it->value.MemberCount();
|
||||
} else if (it->name == "B") {
|
||||
if (!it->value.IsString()) {
|
||||
throw api_error::validation(format("invalid byte string: {}", v));
|
||||
}
|
||||
ret = base64_decoded_len(rjson::to_string_view(it->value));
|
||||
} else if (from_data) {
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::add(json_ret, "null", rjson::value(true));
|
||||
return json_ret;
|
||||
} else {
|
||||
throw api_error::validation(format("Unsupported operand type {} for function size()", it->name));
|
||||
}
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::add(json_ret, "N", rjson::from_string(std::to_string(ret)));
|
||||
return json_ret;
|
||||
}
|
||||
|
||||
static const rjson::value& calculate_value(const parsed::constant& c) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant::literal& v) -> const rjson::value& {
|
||||
return *v;
|
||||
},
|
||||
[&] (const std::string& valref) -> const rjson::value& {
|
||||
// Shouldn't happen, we should have called resolve_value() earlier
|
||||
// and replaced the value reference by the literal constant.
|
||||
throw std::logic_error("calculate_value() called before resolve_value()");
|
||||
}
|
||||
}, c._value);
|
||||
}
|
||||
|
||||
static rjson::value to_bool_json(bool b) {
|
||||
rjson::value json_ret = rjson::empty_object();
|
||||
rjson::add(json_ret, "BOOL", rjson::value(b));
|
||||
return json_ret;
|
||||
}
|
||||
|
||||
static bool known_type(std::string_view type) {
|
||||
static thread_local const std::unordered_set<std::string_view> types = {
|
||||
"N", "S", "B", "NS", "SS", "BS", "L", "M", "NULL", "BOOL"
|
||||
};
|
||||
return types.contains(type);
|
||||
}
|
||||
|
||||
using function_handler_type = rjson::value(calculate_value_caller, const rjson::value*, const parsed::value::function_call&);
|
||||
static const
|
||||
std::unordered_map<std::string_view, function_handler_type*> function_handlers {
|
||||
{"list_append", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::UpdateExpression) {
|
||||
throw api_error::validation(
|
||||
format("{}: list_append() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error::validation(
|
||||
format("{}: list_append() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
rjson::value ret = list_concatenate(v1, v2);
|
||||
if (ret.IsNull()) {
|
||||
throw api_error::validation("UpdateExpression: list_append() given a non-list");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
},
|
||||
{"if_not_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::UpdateExpression) {
|
||||
throw api_error::validation(
|
||||
format("{}: if_not_exists() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error::validation(
|
||||
format("{}: if_not_exists() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
if (!std::holds_alternative<parsed::path>(f._parameters[0]._value)) {
|
||||
throw api_error::validation(
|
||||
format("{}: if_not_exists() must include path as its first argument", caller));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return v1.IsNull() ? std::move(v2) : std::move(v1);
|
||||
}
|
||||
},
|
||||
{"size", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpression) {
|
||||
throw api_error::validation(
|
||||
format("{}: size() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 1) {
|
||||
throw api_error::validation(
|
||||
format("{}: size() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return calculate_size(v, f._parameters[0].is_path());
|
||||
}
|
||||
},
|
||||
{"attribute_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_exists() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 1) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_exists() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
if (!std::holds_alternative<parsed::path>(f._parameters[0]._value)) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_exists()'s parameter must be a path", caller));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return to_bool_json(!v.IsNull());
|
||||
}
|
||||
},
|
||||
{"attribute_not_exists", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_not_exists() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 1) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_not_exists() accepts 1 parameter, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
if (!std::holds_alternative<parsed::path>(f._parameters[0]._value)) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_not_exists()'s parameter must be a path", caller));
|
||||
}
|
||||
rjson::value v = calculate_value(f._parameters[0], caller, previous_item);
|
||||
return to_bool_json(v.IsNull());
|
||||
}
|
||||
},
|
||||
{"attribute_type", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_type() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_type() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
// There is no real reason for the following check (not
|
||||
// allowing the type to come from a document attribute), but
|
||||
// DynamoDB does this check, so we do too...
|
||||
if (!f._parameters[1].is_constant()) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_types()'s first parameter must be an expression attribute", caller));
|
||||
}
|
||||
rjson::value v0 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v1 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
if (v1.IsObject() && v1.MemberCount() == 1 && v1.MemberBegin()->name == "S") {
|
||||
// If the type parameter is not one of the legal types
|
||||
// we should generate an error, not a failed condition:
|
||||
if (!known_type(rjson::to_string_view(v1.MemberBegin()->value))) {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_types()'s second parameter, {}, is not a known type",
|
||||
caller, v1.MemberBegin()->value));
|
||||
}
|
||||
if (v0.IsObject() && v0.MemberCount() == 1) {
|
||||
return to_bool_json(v1.MemberBegin()->value == v0.MemberBegin()->name);
|
||||
} else {
|
||||
return to_bool_json(false);
|
||||
}
|
||||
} else {
|
||||
throw api_error::validation(
|
||||
format("{}: attribute_type() second parameter must refer to a string, got {}", caller, v1));
|
||||
}
|
||||
}
|
||||
},
|
||||
{"begins_with", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error::validation(
|
||||
format("{}: begins_with() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error::validation(
|
||||
format("{}: begins_with() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return to_bool_json(check_BEGINS_WITH(v1.IsNull() ? nullptr : &v1, v2,
|
||||
f._parameters[0].is_constant(), f._parameters[1].is_constant()));
|
||||
}
|
||||
},
|
||||
{"contains", [] (calculate_value_caller caller, const rjson::value* previous_item, const parsed::value::function_call& f) {
|
||||
if (caller != calculate_value_caller::ConditionExpressionAlone) {
|
||||
throw api_error::validation(
|
||||
format("{}: contains() not allowed here", caller));
|
||||
}
|
||||
if (f._parameters.size() != 2) {
|
||||
throw api_error::validation(
|
||||
format("{}: contains() accepts 2 parameters, got {}", caller, f._parameters.size()));
|
||||
}
|
||||
rjson::value v1 = calculate_value(f._parameters[0], caller, previous_item);
|
||||
rjson::value v2 = calculate_value(f._parameters[1], caller, previous_item);
|
||||
return to_bool_json(check_CONTAINS(v1.IsNull() ? nullptr : &v1, v2,
|
||||
f._parameters[0].is_constant(), f._parameters[1].is_constant()));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// Given a parsed::path and an item read from the table, extract the value
|
||||
// of a certain attribute path, such as "a" or "a.b.c[3]". Returns a null
|
||||
// value if the item or the requested attribute does not exist.
|
||||
// Note that the item is assumed to be encoded in JSON using DynamoDB
|
||||
// conventions - each level of a nested document is a map with one key -
|
||||
// a type (e.g., "M" for map) - and its value is the representation of
|
||||
// that value.
|
||||
static rjson::value extract_path(const rjson::value* item,
|
||||
const parsed::path& p, calculate_value_caller caller) {
|
||||
if (!item) {
|
||||
return rjson::null_value();
|
||||
}
|
||||
const rjson::value* v = rjson::find(*item, p.root());
|
||||
if (!v) {
|
||||
return rjson::null_value();
|
||||
}
|
||||
for (const auto& op : p.operators()) {
|
||||
if (!v->IsObject() || v->MemberCount() != 1) {
|
||||
// This shouldn't happen. We shouldn't have stored malformed
|
||||
// objects. But today Alternator does not validate the structure
|
||||
// of nested documents before storing them, so this can happen on
|
||||
// read.
|
||||
throw api_error::validation(format("{}: malformed item read: {}", caller, *item));
|
||||
}
|
||||
const char* type = v->MemberBegin()->name.GetString();
|
||||
v = &(v->MemberBegin()->value);
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const std::string& member) {
|
||||
if (type[0] == 'M' && v->IsObject()) {
|
||||
v = rjson::find(*v, member);
|
||||
} else {
|
||||
v = nullptr;
|
||||
}
|
||||
},
|
||||
[&] (unsigned index) {
|
||||
if (type[0] == 'L' && v->IsArray() && index < v->Size()) {
|
||||
v = &(v->GetArray()[index]);
|
||||
} else {
|
||||
v = nullptr;
|
||||
}
|
||||
}
|
||||
}, op);
|
||||
if (!v) {
|
||||
return rjson::null_value();
|
||||
}
|
||||
}
|
||||
return rjson::copy(*v);
|
||||
}
|
||||
|
||||
// Given a parsed::value, which can refer either to a constant value from
|
||||
// ExpressionAttributeValues, to the value of some attribute, or to a function
|
||||
// of other values, this function calculates the resulting value.
|
||||
// "caller" determines which expression - ConditionExpression or
|
||||
// UpdateExpression - is asking for this value. We need to know this because
|
||||
// DynamoDB allows a different choice of functions for different expressions.
|
||||
rjson::value calculate_value(const parsed::value& v,
|
||||
calculate_value_caller caller,
|
||||
const rjson::value* previous_item) {
|
||||
return std::visit(overloaded_functor {
|
||||
[&] (const parsed::constant& c) -> rjson::value {
|
||||
return rjson::copy(calculate_value(c));
|
||||
},
|
||||
[&] (const parsed::value::function_call& f) -> rjson::value {
|
||||
auto function_it = function_handlers.find(std::string_view(f._function_name));
|
||||
if (function_it == function_handlers.end()) {
|
||||
throw api_error::validation(
|
||||
format("{}: unknown function '{}' called.", caller, f._function_name));
|
||||
}
|
||||
return function_it->second(caller, previous_item, f);
|
||||
},
|
||||
[&] (const parsed::path& p) -> rjson::value {
|
||||
return extract_path(previous_item, p, caller);
|
||||
}
|
||||
}, v._value);
|
||||
}
|
||||
|
||||
// Same as calculate_value() above, except takes a set_rhs, which may be
|
||||
// either a single value, or v1+v2 or v1-v2.
|
||||
rjson::value calculate_value(const parsed::set_rhs& rhs,
|
||||
const rjson::value* previous_item) {
|
||||
switch (rhs._op) {
|
||||
case 'v':
|
||||
return calculate_value(rhs._v1, calculate_value_caller::UpdateExpression, previous_item);
|
||||
case '+': {
|
||||
rjson::value v1 = calculate_value(rhs._v1, calculate_value_caller::UpdateExpression, previous_item);
|
||||
rjson::value v2 = calculate_value(rhs._v2, calculate_value_caller::UpdateExpression, previous_item);
|
||||
return number_add(v1, v2);
|
||||
}
|
||||
case '-': {
|
||||
rjson::value v1 = calculate_value(rhs._v1, calculate_value_caller::UpdateExpression, previous_item);
|
||||
rjson::value v2 = calculate_value(rhs._v2, calculate_value_caller::UpdateExpression, previous_item);
|
||||
return number_subtract(v1, v2);
|
||||
}
|
||||
}
|
||||
// Can't happen
|
||||
return rjson::null_value();
|
||||
}
|
||||
|
||||
} // namespace alternator
|
||||
|
||||
auto fmt::formatter<alternator::parsed::path>::format(const alternator::parsed::path& p, fmt::format_context& ctx) const
|
||||
-> decltype(ctx.out()) {
|
||||
auto out = ctx.out();
|
||||
out = fmt::format_to(out, "{}", p.root());
|
||||
for (const auto& op : p.operators()) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const std::string& member) {
|
||||
out = fmt::format_to(out, ".{}", member);
|
||||
},
|
||||
[&] (unsigned index) {
|
||||
out = fmt::format_to(out, "[{}]", index);
|
||||
}
|
||||
}, op);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
@@ -1,283 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
/*
|
||||
* The DynamoDB protocol is based on JSON, and most DynamoDB requests
|
||||
* describe the operation and its parameters via JSON objects such as maps
|
||||
* and lists. Nevertheless, in some types of requests an "expression" is
|
||||
* passed as a single string, and we need to parse this string. These
|
||||
* cases include:
|
||||
* 1. Attribute paths, such as "a[3].b.c", are used in projection
|
||||
* expressions as well as inside other expressions described below.
|
||||
* 2. Condition expressions, such as "(NOT (a=b OR c=d)) AND e=f",
|
||||
* used in conditional updates, filters, and other places.
|
||||
* 3. Update expressions, such as "SET #a.b = :x, c = :y DELETE d"
|
||||
*
|
||||
* All these expression syntaxes are very simple: Most of them could be
|
||||
* parsed as regular expressions, and the parenthesized condition expression
|
||||
* could be done with a simple hand-written lexical analyzer and recursive-
|
||||
* descent parser. Nevertheless, we decided to specify these parsers in the
|
||||
* ANTLR3 language already used in the Scylla project, hopefully making these
|
||||
* parsers easier to reason about, and easier to change if needed - and
|
||||
* reducing the amount of boiler-plate code.
|
||||
*/
|
||||
|
||||
grammar expressions;
|
||||
|
||||
options {
|
||||
language = Cpp;
|
||||
}
|
||||
|
||||
@parser::namespace{alternator}
|
||||
@lexer::namespace{alternator}
|
||||
|
||||
/* TODO: explain what these traits things are. I haven't seen them explained
|
||||
* in any document... Compilation fails without these fail because a definition
|
||||
* of "expressionsLexerTraits" and "expressionParserTraits" is needed.
|
||||
*/
|
||||
@lexer::traits {
|
||||
class expressionsLexer;
|
||||
class expressionsParser;
|
||||
typedef antlr3::Traits<expressionsLexer, expressionsParser> expressionsLexerTraits;
|
||||
}
|
||||
@parser::traits {
|
||||
typedef expressionsLexerTraits expressionsParserTraits;
|
||||
}
|
||||
|
||||
@lexer::header {
|
||||
#include "alternator/expressions.hh"
|
||||
// ANTLR generates a bunch of unused variables and functions. Yuck...
|
||||
#pragma GCC diagnostic ignored "-Wunused-variable"
|
||||
#pragma GCC diagnostic ignored "-Wunused-function"
|
||||
}
|
||||
@parser::header {
|
||||
#include "expressionsLexer.hpp"
|
||||
}
|
||||
|
||||
/* By default, ANTLR3 composes elaborate syntax-error messages, saying which
|
||||
* token was unexpected, where, and so on on, but then dutifully writes these
|
||||
* error messages to the standard error, and returns from the parser as if
|
||||
* everything was fine, with a half-constructed output object! If we define
|
||||
* the "displayRecognitionError" method, it will be called upon to build this
|
||||
* error message, and we can instead throw an exception to stop the parsing
|
||||
* immediately. This is good enough for now, for our simple needs, but if
|
||||
* we ever want to show more information about the syntax error, Cql3.g
|
||||
* contains an elaborate implementation (it would be nice if we could reuse
|
||||
* it, not duplicate it).
|
||||
* Unfortunately, we have to repeat the same definition twice - once for the
|
||||
* parser, and once for the lexer.
|
||||
*/
|
||||
@parser::context {
|
||||
void displayRecognitionError(ANTLR_UINT8** token_names, ExceptionBaseType* ex) {
|
||||
const char* err;
|
||||
switch (ex->getType()) {
|
||||
case antlr3::ExceptionType::FAILED_PREDICATE_EXCEPTION:
|
||||
err = "expression nested too deeply";
|
||||
break;
|
||||
default:
|
||||
err = "syntax error";
|
||||
break;
|
||||
}
|
||||
// Alternator expressions are always single line so ex->get_line()
|
||||
// is always 1, no sense to print it.
|
||||
// TODO: return the position as part of the exception, so the
|
||||
// caller in expressions.cc that knows the expression string can
|
||||
// mark the error position in the final error message.
|
||||
throw expressions_syntax_error(format("{} at char {}", err,
|
||||
ex->get_charPositionInLine()));
|
||||
}
|
||||
}
|
||||
@lexer::context {
|
||||
void displayRecognitionError(ANTLR_UINT8** token_names, ExceptionBaseType* ex) {
|
||||
throw expressions_syntax_error("syntax error");
|
||||
}
|
||||
}
|
||||
|
||||
/* Unfortunately, ANTLR uses recursion - not the heap - to parse recursive
|
||||
* expressions. To make things even worse, ANTLR has no way to limit the
|
||||
* depth of this recursion (unlike Yacc which has YYMAXDEPTH). So deeply-
|
||||
* nested expression like "(((((((((((((..." can easily crash Scylla on a
|
||||
* stack overflow (see issue #14477).
|
||||
*
|
||||
* We are lucky that in the grammar for DynamoDB expressions (below),
|
||||
* only a few specific rules can recurse, so it was fairly easy to add a
|
||||
* "depth" counter to a few specific rules, and then use a predicate
|
||||
* "{depth<MAX_DEPTH}?" to avoid parsing if the depth exceeds this limit,
|
||||
* and throw a FAILED_PREDICATE_EXCEPTION in that case, which we will
|
||||
* report to the user as a "expression nested too deeply" error.
|
||||
*/
|
||||
@parser::members {
|
||||
static constexpr int MAX_DEPTH = 400;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lexical analysis phase, i.e., splitting the input up to tokens.
|
||||
* Lexical analyzer rules have names starting in capital letters.
|
||||
* "fragment" rules do not generate tokens, and are just aliases used to
|
||||
* make other rules more readable.
|
||||
* Characters *not* listed here, e.g., '=', '(', etc., will be handled
|
||||
* as individual tokens on their own right.
|
||||
* Whitespace spans are skipped, so do not generate tokens.
|
||||
*/
|
||||
WHITESPACE: (' ' | '\t' | '\n' | '\r')+ { skip(); };
|
||||
|
||||
/* shortcuts for case-insensitive keywords */
|
||||
fragment A:('a'|'A');
|
||||
fragment B:('b'|'B');
|
||||
fragment C:('c'|'C');
|
||||
fragment D:('d'|'D');
|
||||
fragment E:('e'|'E');
|
||||
fragment F:('f'|'F');
|
||||
fragment G:('g'|'G');
|
||||
fragment H:('h'|'H');
|
||||
fragment I:('i'|'I');
|
||||
fragment J:('j'|'J');
|
||||
fragment K:('k'|'K');
|
||||
fragment L:('l'|'L');
|
||||
fragment M:('m'|'M');
|
||||
fragment N:('n'|'N');
|
||||
fragment O:('o'|'O');
|
||||
fragment P:('p'|'P');
|
||||
fragment Q:('q'|'Q');
|
||||
fragment R:('r'|'R');
|
||||
fragment S:('s'|'S');
|
||||
fragment T:('t'|'T');
|
||||
fragment U:('u'|'U');
|
||||
fragment V:('v'|'V');
|
||||
fragment W:('w'|'W');
|
||||
fragment X:('x'|'X');
|
||||
fragment Y:('y'|'Y');
|
||||
fragment Z:('z'|'Z');
|
||||
/* These keywords must be appear before the generic NAME token below,
|
||||
* because NAME matches too, and the first to match wins.
|
||||
*/
|
||||
SET: S E T;
|
||||
REMOVE: R E M O V E;
|
||||
ADD: A D D;
|
||||
DELETE: D E L E T E;
|
||||
|
||||
AND: A N D;
|
||||
OR: O R;
|
||||
NOT: N O T;
|
||||
BETWEEN: B E T W E E N;
|
||||
IN: I N;
|
||||
|
||||
fragment ALPHA: 'A'..'Z' | 'a'..'z';
|
||||
fragment DIGIT: '0'..'9';
|
||||
fragment ALNUM: ALPHA | DIGIT | '_';
|
||||
INTEGER: DIGIT+;
|
||||
NAME: ALPHA ALNUM*;
|
||||
NAMEREF: '#' ALNUM+;
|
||||
VALREF: ':' ALNUM+;
|
||||
|
||||
/*
|
||||
* Parsing phase - parsing the string of tokens generated by the lexical
|
||||
* analyzer defined above.
|
||||
*/
|
||||
|
||||
path_component: NAME | NAMEREF;
|
||||
path returns [parsed::path p]:
|
||||
root=path_component { $p.set_root($root.text); }
|
||||
( '.' name=path_component { $p.add_dot($name.text); }
|
||||
| '[' INTEGER ']' { $p.add_index(std::stoi($INTEGER.text)); }
|
||||
)*;
|
||||
|
||||
/* See comment above why the "depth" counter was needed here */
|
||||
value[int depth] returns [parsed::value v]:
|
||||
VALREF { $v.set_valref($VALREF.text); }
|
||||
| path { $v.set_path($path.p); }
|
||||
| {depth<MAX_DEPTH}? NAME { $v.set_func_name($NAME.text); }
|
||||
'(' x=value[depth+1] { $v.add_func_parameter($x.v); }
|
||||
(',' x=value[depth+1] { $v.add_func_parameter($x.v); })*
|
||||
')'
|
||||
;
|
||||
|
||||
update_expression_set_rhs returns [parsed::set_rhs rhs]:
|
||||
v=value[0] { $rhs.set_value(std::move($v.v)); }
|
||||
( '+' v=value[0] { $rhs.set_plus(std::move($v.v)); }
|
||||
| '-' v=value[0] { $rhs.set_minus(std::move($v.v)); }
|
||||
)?
|
||||
;
|
||||
|
||||
update_expression_set_action returns [parsed::update_expression::action a]:
|
||||
path '=' rhs=update_expression_set_rhs { $a.assign_set($path.p, $rhs.rhs); };
|
||||
|
||||
update_expression_remove_action returns [parsed::update_expression::action a]:
|
||||
path { $a.assign_remove($path.p); };
|
||||
|
||||
update_expression_add_action returns [parsed::update_expression::action a]:
|
||||
path VALREF { $a.assign_add($path.p, $VALREF.text); };
|
||||
|
||||
update_expression_delete_action returns [parsed::update_expression::action a]:
|
||||
path VALREF { $a.assign_del($path.p, $VALREF.text); };
|
||||
|
||||
update_expression_clause returns [parsed::update_expression e]:
|
||||
SET s=update_expression_set_action { $e.add(s); }
|
||||
(',' s=update_expression_set_action { $e.add(s); })*
|
||||
| REMOVE r=update_expression_remove_action { $e.add(r); }
|
||||
(',' r=update_expression_remove_action { $e.add(r); })*
|
||||
| ADD a=update_expression_add_action { $e.add(a); }
|
||||
(',' a=update_expression_add_action { $e.add(a); })*
|
||||
| DELETE d=update_expression_delete_action { $e.add(d); }
|
||||
(',' d=update_expression_delete_action { $e.add(d); })*
|
||||
;
|
||||
|
||||
// Note the "EOF" token at the end of the update expression. We want to the
|
||||
// parser to match the entire string given to it - not just its beginning!
|
||||
update_expression returns [parsed::update_expression e]:
|
||||
(update_expression_clause { e.append($update_expression_clause.e); })* EOF;
|
||||
|
||||
projection_expression returns [std::vector<parsed::path> v]:
|
||||
p=path { $v.push_back(std::move($p.p)); }
|
||||
(',' p=path { $v.push_back(std::move($p.p)); } )* EOF;
|
||||
|
||||
|
||||
primitive_condition returns [parsed::primitive_condition c]:
|
||||
v=value[0] { $c.add_value(std::move($v.v));
|
||||
$c.set_operator(parsed::primitive_condition::type::VALUE); }
|
||||
( ( '=' { $c.set_operator(parsed::primitive_condition::type::EQ); }
|
||||
| '<' '>' { $c.set_operator(parsed::primitive_condition::type::NE); }
|
||||
| '<' { $c.set_operator(parsed::primitive_condition::type::LT); }
|
||||
| '<' '=' { $c.set_operator(parsed::primitive_condition::type::LE); }
|
||||
| '>' { $c.set_operator(parsed::primitive_condition::type::GT); }
|
||||
| '>' '=' { $c.set_operator(parsed::primitive_condition::type::GE); }
|
||||
)
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
| BETWEEN { $c.set_operator(parsed::primitive_condition::type::BETWEEN); }
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
AND
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
| IN '(' { $c.set_operator(parsed::primitive_condition::type::IN); }
|
||||
v=value[0] { $c.add_value(std::move($v.v)); }
|
||||
(',' v=value[0] { $c.add_value(std::move($v.v)); })*
|
||||
')'
|
||||
)?
|
||||
;
|
||||
|
||||
// The following rules for parsing boolean expressions are verbose and
|
||||
// somewhat strange because of Antlr 3's limitations on recursive rules,
|
||||
// common rule prefixes, and (lack of) support for operator precedence.
|
||||
// These rules could have been written more clearly using a more powerful
|
||||
// parser generator - such as Yacc.
|
||||
// See comment above why the "depth" counter was needed here.
|
||||
boolean_expression[int depth] returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_1[depth] { $e.append(std::move($b.e), '|'); }
|
||||
(OR b=boolean_expression_1[depth] { $e.append(std::move($b.e), '|'); } )*
|
||||
;
|
||||
boolean_expression_1[int depth] returns [parsed::condition_expression e]:
|
||||
b=boolean_expression_2[depth] { $e.append(std::move($b.e), '&'); }
|
||||
(AND b=boolean_expression_2[depth] { $e.append(std::move($b.e), '&'); } )*
|
||||
;
|
||||
boolean_expression_2[int depth] returns [parsed::condition_expression e]:
|
||||
p=primitive_condition { $e.set_primitive(std::move($p.c)); }
|
||||
| {depth<MAX_DEPTH}? NOT b=boolean_expression_2[depth+1] { $e = std::move($b.e); $e.apply_not(); }
|
||||
| {depth<MAX_DEPTH}? '(' b=boolean_expression[depth+1] ')' { $e = std::move($b.e); }
|
||||
;
|
||||
|
||||
condition_expression returns [parsed::condition_expression e]:
|
||||
boolean_expression[0] { e=std::move($boolean_expression.e); } EOF;
|
||||
@@ -1,95 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
#include <unordered_set>
|
||||
#include <string_view>
|
||||
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
|
||||
#include "expressions_types.hh"
|
||||
#include "utils/rjson.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
class expressions_syntax_error : public std::runtime_error {
|
||||
public:
|
||||
using runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
parsed::update_expression parse_update_expression(std::string_view query);
|
||||
std::vector<parsed::path> parse_projection_expression(std::string_view query);
|
||||
parsed::condition_expression parse_condition_expression(std::string_view query, const char* caller);
|
||||
|
||||
void resolve_update_expression(parsed::update_expression& ue,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values);
|
||||
void resolve_projection_expression(std::vector<parsed::path>& pe,
|
||||
const rjson::value* expression_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_names);
|
||||
void resolve_condition_expression(parsed::condition_expression& ce,
|
||||
const rjson::value* expression_attribute_names,
|
||||
const rjson::value* expression_attribute_values,
|
||||
std::unordered_set<std::string>& used_attribute_names,
|
||||
std::unordered_set<std::string>& used_attribute_values);
|
||||
|
||||
void validate_value(const rjson::value& v, const char* caller);
|
||||
|
||||
bool condition_expression_on(const parsed::condition_expression& ce, std::string_view attribute);
|
||||
|
||||
// for_condition_expression_on() runs the given function on the attributes
|
||||
// that the expression uses. It may run for the same attribute more than once
|
||||
// if the same attribute is used more than once in the expression.
|
||||
void for_condition_expression_on(const parsed::condition_expression& ce, const noncopyable_function<void(std::string_view)>& func);
|
||||
|
||||
// calculate_value() behaves slightly different (especially, different
|
||||
// functions supported) when used in different types of expressions, as
|
||||
// enumerated in this enum:
|
||||
enum class calculate_value_caller {
|
||||
UpdateExpression, ConditionExpression, ConditionExpressionAlone
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
template <> struct fmt::formatter<alternator::calculate_value_caller> {
|
||||
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
|
||||
auto format(alternator::calculate_value_caller caller, fmt::format_context& ctx) const {
|
||||
std::string_view name = "unknown type of expression";
|
||||
switch (caller) {
|
||||
using enum alternator::calculate_value_caller;
|
||||
case UpdateExpression:
|
||||
name = "UpdateExpression";
|
||||
break;
|
||||
case ConditionExpression:
|
||||
name = "ConditionExpression";
|
||||
break;
|
||||
case ConditionExpressionAlone:
|
||||
name = "ConditionExpression";
|
||||
break;
|
||||
}
|
||||
return fmt::format_to(ctx.out(), "{}", name);
|
||||
}
|
||||
};
|
||||
|
||||
namespace alternator {
|
||||
|
||||
rjson::value calculate_value(const parsed::value& v,
|
||||
calculate_value_caller caller,
|
||||
const rjson::value* previous_item);
|
||||
|
||||
rjson::value calculate_value(const parsed::set_rhs& rhs,
|
||||
const rjson::value* previous_item);
|
||||
|
||||
|
||||
} /* namespace alternator */
|
||||
@@ -1,260 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
|
||||
#include "utils/rjson.hh"
|
||||
|
||||
/*
|
||||
* Parsed representation of expressions and their components.
|
||||
*
|
||||
* Types in alternator::parsed namespace are used for holding the parse
|
||||
* tree - objects generated by the Antlr rules after parsing an expression.
|
||||
* Because of the way Antlr works, all these objects are default-constructed
|
||||
* first, and then assigned when the rule is completed, so all these types
|
||||
* have only default constructors - but setter functions to set them later.
|
||||
*/
|
||||
|
||||
namespace alternator {
|
||||
namespace parsed {
|
||||
|
||||
// "path" is an attribute's path in a document, e.g., a.b[3].c.
|
||||
class path {
|
||||
// All paths have a "root", a top-level attribute, and any number of
|
||||
// "dereference operators" - each either an index (e.g., "[2]") or a
|
||||
// dot (e.g., ".xyz").
|
||||
std::string _root;
|
||||
std::vector<std::variant<std::string, unsigned>> _operators;
|
||||
// It is useful to limit the depth of a user-specified path, because is
|
||||
// allows us to use recursive algorithms without worrying about recursion
|
||||
// depth. DynamoDB officially limits the length of paths to 32 components
|
||||
// (including the root) so let's use the same limit.
|
||||
static constexpr unsigned depth_limit = 32;
|
||||
void check_depth_limit();
|
||||
public:
|
||||
void set_root(std::string root) {
|
||||
_root = std::move(root);
|
||||
}
|
||||
void add_index(unsigned i) {
|
||||
_operators.emplace_back(i);
|
||||
check_depth_limit();
|
||||
}
|
||||
void add_dot(std::string(name)) {
|
||||
_operators.emplace_back(std::move(name));
|
||||
check_depth_limit();
|
||||
}
|
||||
const std::string& root() const {
|
||||
return _root;
|
||||
}
|
||||
bool has_operators() const {
|
||||
return !_operators.empty();
|
||||
}
|
||||
const std::vector<std::variant<std::string, unsigned>>& operators() const {
|
||||
return _operators;
|
||||
}
|
||||
std::vector<std::variant<std::string, unsigned>>& operators() {
|
||||
return _operators;
|
||||
}
|
||||
};
|
||||
|
||||
// When an expression is first parsed, all constants are references, like
|
||||
// ":val1", into ExpressionAttributeValues. This uses std::string() variant.
|
||||
// The resolve_value() function replaces these constants by the JSON item
|
||||
// extracted from the ExpressionAttributeValues.
|
||||
struct constant {
|
||||
// We use lw_shared_ptr<rjson::value> just to make rjson::value copyable,
|
||||
// to make this entire object copyable as ANTLR needs.
|
||||
using literal = lw_shared_ptr<rjson::value>;
|
||||
std::variant<std::string, literal> _value;
|
||||
void set(const rjson::value& v) {
|
||||
_value = make_lw_shared<rjson::value>(rjson::copy(v));
|
||||
}
|
||||
void set(std::string& s) {
|
||||
_value = s;
|
||||
}
|
||||
};
|
||||
|
||||
// "value" is is a value used in the right hand side of an assignment
|
||||
// expression, "SET a = ...". It can be a constant (a reference to a value
|
||||
// included in the request, e.g., ":val"), a path to an attribute from the
|
||||
// existing item (e.g., "a.b[3].c"), or a function of other such values.
|
||||
// Note that the real right-hand-side of an assignment is actually a bit
|
||||
// more general - it allows either a value, or a value+value or value-value -
|
||||
// see class set_rhs below.
|
||||
struct value {
|
||||
struct function_call {
|
||||
std::string _function_name;
|
||||
std::vector<value> _parameters;
|
||||
};
|
||||
std::variant<constant, path, function_call> _value;
|
||||
void set_constant(constant c) {
|
||||
_value = std::move(c);
|
||||
}
|
||||
void set_valref(std::string s) {
|
||||
_value = constant { std::move(s) };
|
||||
}
|
||||
void set_path(path p) {
|
||||
_value = std::move(p);
|
||||
}
|
||||
void set_func_name(std::string s) {
|
||||
_value = function_call {std::move(s), {}};
|
||||
}
|
||||
void add_func_parameter(value v) {
|
||||
std::get<function_call>(_value)._parameters.emplace_back(std::move(v));
|
||||
}
|
||||
bool is_constant() const {
|
||||
return std::holds_alternative<constant>(_value);
|
||||
}
|
||||
bool is_path() const {
|
||||
return std::holds_alternative<path>(_value);
|
||||
}
|
||||
bool is_func() const {
|
||||
return std::holds_alternative<function_call>(_value);
|
||||
}
|
||||
};
|
||||
|
||||
// The right-hand-side of a SET in an update expression can be either a
|
||||
// single value (see above), or value+value, or value-value.
|
||||
class set_rhs {
|
||||
public:
|
||||
char _op; // '+', '-', or 'v''
|
||||
value _v1;
|
||||
value _v2;
|
||||
void set_value(value&& v1) {
|
||||
_op = 'v';
|
||||
_v1 = std::move(v1);
|
||||
}
|
||||
void set_plus(value&& v2) {
|
||||
_op = '+';
|
||||
_v2 = std::move(v2);
|
||||
}
|
||||
void set_minus(value&& v2) {
|
||||
_op = '-';
|
||||
_v2 = std::move(v2);
|
||||
}
|
||||
};
|
||||
|
||||
class update_expression {
|
||||
public:
|
||||
struct action {
|
||||
path _path;
|
||||
struct set {
|
||||
set_rhs _rhs;
|
||||
};
|
||||
struct remove {
|
||||
};
|
||||
struct add {
|
||||
constant _valref;
|
||||
};
|
||||
struct del {
|
||||
constant _valref;
|
||||
};
|
||||
std::variant<set, remove, add, del> _action;
|
||||
|
||||
void assign_set(path p, set_rhs rhs) {
|
||||
_path = std::move(p);
|
||||
_action = set { std::move(rhs) };
|
||||
}
|
||||
void assign_remove(path p) {
|
||||
_path = std::move(p);
|
||||
_action = remove { };
|
||||
}
|
||||
void assign_add(path p, std::string v) {
|
||||
_path = std::move(p);
|
||||
_action = add { constant { std::move(v) } };
|
||||
}
|
||||
void assign_del(path p, std::string v) {
|
||||
_path = std::move(p);
|
||||
_action = del { constant { std::move(v) } };
|
||||
}
|
||||
};
|
||||
private:
|
||||
std::vector<action> _actions;
|
||||
bool seen_set = false;
|
||||
bool seen_remove = false;
|
||||
bool seen_add = false;
|
||||
bool seen_del = false;
|
||||
public:
|
||||
void add(action a);
|
||||
void append(update_expression other);
|
||||
bool empty() const {
|
||||
return _actions.empty();
|
||||
}
|
||||
const std::vector<action>& actions() const {
|
||||
return _actions;
|
||||
}
|
||||
std::vector<action>& actions() {
|
||||
return _actions;
|
||||
}
|
||||
};
|
||||
|
||||
// A primitive_condition is a condition expression involving one condition,
|
||||
// while the full condition_expression below adds boolean logic over these
|
||||
// primitive conditions.
|
||||
// The supported primitive conditions are:
|
||||
// 1. Binary operators - v1 OP v2, where OP is =, <>, <, <=, >, or >= and
|
||||
// v1 and v2 are values - from the item (an attribute path), the query
|
||||
// (a ":val" reference), or a function of the the above (only the size()
|
||||
// function is supported).
|
||||
// 2. Ternary operator - v1 BETWEEN v2 and v3 (means v1 >= v2 AND v1 <= v3).
|
||||
// 3. N-ary operator - v1 IN ( v2, v3, ... )
|
||||
// 4. A single function call (attribute_exists etc.). The parser actually
|
||||
// accepts a more general "value" here but later stages reject a value
|
||||
// which is not a function call (because DynamoDB does it too).
|
||||
class primitive_condition {
|
||||
public:
|
||||
enum class type {
|
||||
UNDEFINED, VALUE, EQ, NE, LT, LE, GT, GE, BETWEEN, IN
|
||||
};
|
||||
type _op = type::UNDEFINED;
|
||||
std::vector<value> _values;
|
||||
void set_operator(type op) {
|
||||
_op = op;
|
||||
}
|
||||
void add_value(value&& v) {
|
||||
_values.push_back(std::move(v));
|
||||
}
|
||||
bool empty() const {
|
||||
return _op == type::UNDEFINED;
|
||||
}
|
||||
};
|
||||
|
||||
class condition_expression {
|
||||
public:
|
||||
bool _negated = false; // If true, the entire condition is negated
|
||||
struct condition_list {
|
||||
char op = '|'; // '&' or '|'
|
||||
std::vector<condition_expression> conditions;
|
||||
};
|
||||
std::variant<primitive_condition, condition_list> _expression = condition_list();
|
||||
|
||||
void set_primitive(primitive_condition&& p) {
|
||||
_expression = std::move(p);
|
||||
}
|
||||
void append(condition_expression&& c, char op);
|
||||
void apply_not() {
|
||||
_negated = !_negated;
|
||||
}
|
||||
bool empty() const {
|
||||
return std::holds_alternative<condition_list>(_expression) &&
|
||||
std::get<condition_list>(_expression).conditions.empty();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace parsed
|
||||
} // namespace alternator
|
||||
|
||||
template <> struct fmt::formatter<alternator::parsed::path> : fmt::formatter<string_view> {
|
||||
auto format(const alternator::parsed::path&, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
@@ -1,120 +0,0 @@
|
||||
/*
|
||||
* Copyright 2020-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include "service/paxos/cas_request.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "executor.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// An rmw_operation encapsulates the common logic of all the item update
|
||||
// operations which may involve a read of the item before the write
|
||||
// (so-called Read-Modify-Write operations). These operations include PutItem,
|
||||
// UpdateItem and DeleteItem: All of these may be conditional operations (the
|
||||
// "Expected" parameter) which require a read before the write, and UpdateItem
|
||||
// may also have an update expression which refers to the item's old value.
|
||||
//
|
||||
// The code below supports running the read and the write together as one
|
||||
// transaction using LWT (this is why rmw_operation is a subclass of
|
||||
// cas_request, as required by storage_proxy::cas()), but also has optional
|
||||
// modes not using LWT.
|
||||
class rmw_operation : public service::cas_request, public enable_shared_from_this<rmw_operation> {
|
||||
public:
|
||||
// The following options choose which mechanism to use for isolating
|
||||
// parallel write operations:
|
||||
// * The FORBID_RMW option forbids RMW (read-modify-write) operations
|
||||
// such as conditional updates. For the remaining write-only
|
||||
// operations, ordinary quorum writes are isolated enough.
|
||||
// * The LWT_ALWAYS option always uses LWT (lightweight transactions)
|
||||
// for any write operation - whether or not it also has a read.
|
||||
// * The LWT_RMW_ONLY option uses LWT only for RMW operations, and uses
|
||||
// ordinary quorum writes for write-only operations.
|
||||
// This option is not safe if the user may send both RMW and write-only
|
||||
// operations on the same item.
|
||||
// * The UNSAFE_RMW option does read-modify-write operations as separate
|
||||
// read and write. It is unsafe - concurrent RMW operations are not
|
||||
// isolated at all. This option will likely be removed in the future.
|
||||
enum class write_isolation {
|
||||
FORBID_RMW, LWT_ALWAYS, LWT_RMW_ONLY, UNSAFE_RMW
|
||||
};
|
||||
static constexpr auto WRITE_ISOLATION_TAG_KEY = "system:write_isolation";
|
||||
|
||||
static write_isolation get_write_isolation_for_schema(schema_ptr schema);
|
||||
|
||||
static write_isolation default_write_isolation;
|
||||
public:
|
||||
static void set_default_write_isolation(std::string_view mode);
|
||||
|
||||
protected:
|
||||
// The full request JSON
|
||||
rjson::value _request;
|
||||
// All RMW operations involve a single item with a specific partition
|
||||
// and optional clustering key, in a single table, so the following
|
||||
// information is common to all of them:
|
||||
schema_ptr _schema;
|
||||
partition_key _pk = partition_key::make_empty();
|
||||
clustering_key _ck = clustering_key::make_empty();
|
||||
write_isolation _write_isolation;
|
||||
|
||||
// All RMW operations can have a ReturnValues parameter from the following
|
||||
// choices. But note that only UpdateItem actually supports all of them:
|
||||
enum class returnvalues {
|
||||
NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW
|
||||
} _returnvalues;
|
||||
enum class returnvalues_on_condition_check_failure {
|
||||
NONE, ALL_OLD
|
||||
} _returnvalues_on_condition_check_failure;
|
||||
static returnvalues parse_returnvalues(const rjson::value& request);
|
||||
static returnvalues_on_condition_check_failure parse_returnvalues_on_condition_check_failure(const rjson::value& request);
|
||||
// When _returnvalues != NONE, apply() should store here, in JSON form,
|
||||
// the values which are to be returned in the "Attributes" field.
|
||||
// The default null JSON means do not return an Attributes field at all.
|
||||
// This field is marked "mutable" so that the const apply() can modify
|
||||
// it (see explanation below), but note that because apply() may be
|
||||
// called more than once, if apply() will sometimes set this field it
|
||||
// must set it (even if just to the default empty value) every time.
|
||||
// Additionally when _returnvalues_on_condition_check_failure is ALL_OLD
|
||||
// then condition check failure will also result in storing values here.
|
||||
mutable rjson::value _return_attributes;
|
||||
public:
|
||||
// The constructor of a rmw_operation subclass should parse the request
|
||||
// and try to discover as many input errors as it can before really
|
||||
// attempting the read or write operations.
|
||||
rmw_operation(service::storage_proxy& proxy, rjson::value&& request);
|
||||
// rmw_operation subclasses (update_item_operation, put_item_operation
|
||||
// and delete_item_operation) shall implement an apply() function which
|
||||
// takes the previous value of the item (if it was read) and creates the
|
||||
// write mutation. If the previous value of item does not pass the needed
|
||||
// conditional expression, apply() should return an empty optional.
|
||||
// apply() may throw if it encounters input errors not discovered during
|
||||
// the constructor.
|
||||
// apply() may be called more than once in case of contention, so it must
|
||||
// not change the state saved in the object (issue #7218 was caused by
|
||||
// violating this). We mark apply() "const" to let the compiler validate
|
||||
// this for us. The output-only field _return_attributes is marked
|
||||
// "mutable" above so that apply() can still write to it.
|
||||
virtual std::optional<mutation> apply(std::unique_ptr<rjson::value> previous_item, api::timestamp_type ts) const = 0;
|
||||
// Convert the above apply() into the signature needed by cas_request:
|
||||
virtual std::optional<mutation> apply(foreign_ptr<lw_shared_ptr<query::result>> qr, const query::partition_slice& slice, api::timestamp_type ts) override;
|
||||
virtual ~rmw_operation() = default;
|
||||
schema_ptr schema() const { return _schema; }
|
||||
const rjson::value& request() const { return _request; }
|
||||
rjson::value&& move_request() && { return std::move(_request); }
|
||||
future<executor::request_return_type> execute(service::storage_proxy& proxy,
|
||||
service::client_state& client_state,
|
||||
tracing::trace_state_ptr trace_state,
|
||||
service_permit permit,
|
||||
bool needs_read_before_write,
|
||||
stats& stats);
|
||||
std::optional<shard_id> shard_for_execute(bool needs_read_before_write);
|
||||
};
|
||||
|
||||
} // namespace alternator
|
||||
@@ -1,559 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "utils/base64.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "log.hh"
|
||||
#include "serialization.hh"
|
||||
#include "error.hh"
|
||||
#include "concrete_types.hh"
|
||||
#include "cql3/type_json.hh"
|
||||
#include "mutation/position_in_partition.hh"
|
||||
|
||||
static logging::logger slogger("alternator-serialization");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
|
||||
type_info type_info_from_string(std::string_view type) {
|
||||
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {
|
||||
{"S", {alternator_type::S, utf8_type}},
|
||||
{"B", {alternator_type::B, bytes_type}},
|
||||
{"BOOL", {alternator_type::BOOL, boolean_type}},
|
||||
{"N", {alternator_type::N, decimal_type}}, //FIXME: Replace with custom Alternator type when implemented
|
||||
};
|
||||
auto it = type_infos.find(type);
|
||||
if (it == type_infos.end()) {
|
||||
return {alternator_type::NOT_SUPPORTED_YET, utf8_type};
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
type_representation represent_type(alternator_type atype) {
|
||||
static thread_local const std::unordered_map<alternator_type, type_representation> type_representations = {
|
||||
{alternator_type::S, {"S", utf8_type}},
|
||||
{alternator_type::B, {"B", bytes_type}},
|
||||
{alternator_type::BOOL, {"BOOL", boolean_type}},
|
||||
{alternator_type::N, {"N", decimal_type}}, //FIXME: Replace with custom Alternator type when implemented
|
||||
};
|
||||
auto it = type_representations.find(atype);
|
||||
if (it == type_representations.end()) {
|
||||
throw std::runtime_error(format("Unknown alternator type {}", int8_t(atype)));
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
// Get the magnitude and precision of a big_decimal - as these concepts are
|
||||
// defined by DynamoDB - to allow us to enforce limits on those as explained
|
||||
// in ssue #6794. The "magnitude" of 9e123 is 123 and of -9e-123 is -123,
|
||||
// the "precision" of 12.34e56 is the number of significant digits - 4.
|
||||
//
|
||||
// Unfortunately it turned out to be quite difficult to take a big_decimal and
|
||||
// calculate its magnitude and precision from its scale() and unscaled_value().
|
||||
// So in the following ugly implementation we calculate them from the string
|
||||
// representation instead. We assume the number was already parsed
|
||||
// successfully to a big_decimal to it follows its syntax rules.
|
||||
//
|
||||
// FIXME: rewrite this function to take a big_decimal, not a string.
|
||||
// Maybe a snippet like this can help:
|
||||
// boost::multiprecision::cpp_int digits = boost::multiprecision::log10(num.unscaled_value().convert_to<boost::multiprecision::mpf_float_50>()).convert_to<boost::multiprecision::cpp_int>() + 1;
|
||||
|
||||
|
||||
internal::magnitude_and_precision internal::get_magnitude_and_precision(std::string_view s) {
|
||||
size_t e_or_end = s.find_first_of("eE");
|
||||
std::string_view base = s.substr(0, e_or_end);
|
||||
if (s[0]=='-' || s[0]=='+') {
|
||||
base = base.substr(1);
|
||||
}
|
||||
int magnitude = 0;
|
||||
int precision = 0;
|
||||
size_t dot_or_end = base.find_first_of(".");
|
||||
size_t nonzero = base.find_first_not_of("0");
|
||||
if (dot_or_end != std::string_view::npos) {
|
||||
if (nonzero == dot_or_end) {
|
||||
// 0.000031 => magnitude = -5 (like 3.1e-5), precision = 2.
|
||||
std::string_view fraction = base.substr(dot_or_end + 1);
|
||||
size_t nonzero2 = fraction.find_first_not_of("0");
|
||||
if (nonzero2 != std::string_view::npos) {
|
||||
magnitude = -nonzero2 - 1;
|
||||
precision = fraction.size() - nonzero2;
|
||||
}
|
||||
} else {
|
||||
// 000123.45678 => magnitude = 2, precision = 8.
|
||||
magnitude = dot_or_end - nonzero - 1;
|
||||
precision = base.size() - nonzero - 1;
|
||||
}
|
||||
// trailing zeros don't count to precision, e.g., precision
|
||||
// of 1000.0, 1.0 or 1.0000 are just 1.
|
||||
size_t last_significant = base.find_last_not_of(".0");
|
||||
if (last_significant == std::string_view::npos) {
|
||||
precision = 0;
|
||||
} else if (last_significant < dot_or_end) {
|
||||
// e.g., 1000.00 reduce 5 = 7 - (0+1) - 1 from precision
|
||||
precision -= base.size() - last_significant - 2;
|
||||
} else {
|
||||
// e.g., 1235.60 reduce 5 = 7 - (5+1) from precision
|
||||
precision -= base.size() - last_significant - 1;
|
||||
}
|
||||
} else if (nonzero == std::string_view::npos) {
|
||||
// all-zero integer 000000
|
||||
magnitude = 0;
|
||||
precision = 0;
|
||||
} else {
|
||||
magnitude = base.size() - 1 - nonzero;
|
||||
precision = base.size() - nonzero;
|
||||
// trailing zeros don't count to precision, e.g., precision
|
||||
// of 1000 is just 1.
|
||||
size_t last_significant = base.find_last_not_of("0");
|
||||
if (last_significant == std::string_view::npos) {
|
||||
precision = 0;
|
||||
} else {
|
||||
// e.g., 1000 reduce 3 = 4 - (0+1)
|
||||
precision -= base.size() - last_significant - 1;
|
||||
}
|
||||
}
|
||||
if (precision && e_or_end != std::string_view::npos) {
|
||||
std::string_view exponent = s.substr(e_or_end + 1);
|
||||
if (exponent.size() > 4) {
|
||||
// don't even bother atoi(), exponent is too large
|
||||
magnitude = exponent[0]=='-' ? -9999 : 9999;
|
||||
} else {
|
||||
try {
|
||||
magnitude += boost::lexical_cast<int32_t>(exponent);
|
||||
} catch (...) {
|
||||
magnitude = 9999;
|
||||
}
|
||||
}
|
||||
}
|
||||
return magnitude_and_precision {magnitude, precision};
|
||||
}
|
||||
|
||||
// Parse a number read from user input, validating that it has a valid
|
||||
// numeric format and also in the allowed magnitude and precision ranges
|
||||
// (see issue #6794). Throws an api_error::validation if the validation
|
||||
// failed.
|
||||
static big_decimal parse_and_validate_number(std::string_view s) {
|
||||
try {
|
||||
big_decimal ret(s);
|
||||
auto [magnitude, precision] = internal::get_magnitude_and_precision(s);
|
||||
if (magnitude > 125) {
|
||||
throw api_error::validation(format("Number overflow: {}. Attempting to store a number with magnitude larger than supported range.", s));
|
||||
}
|
||||
if (magnitude < -130) {
|
||||
throw api_error::validation(format("Number underflow: {}. Attempting to store a number with magnitude lower than supported range.", s));
|
||||
}
|
||||
if (precision > 38) {
|
||||
throw api_error::validation(format("Number too precise: {}. Attempting to store a number with more significant digits than supported.", s));
|
||||
}
|
||||
return ret;
|
||||
} catch (const marshal_exception& e) {
|
||||
throw api_error::validation(format("The parameter cannot be converted to a numeric value: {}", s));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
struct from_json_visitor {
|
||||
const rjson::value& v;
|
||||
bytes_ostream& bo;
|
||||
|
||||
void operator()(const reversed_type_impl& t) const { visit(*t.underlying_type(), from_json_visitor{v, bo}); };
|
||||
void operator()(const string_type_impl& t) {
|
||||
bo.write(t.from_string(rjson::to_string_view(v)));
|
||||
}
|
||||
void operator()(const bytes_type_impl& t) const {
|
||||
// FIXME: it's difficult at this point to get information if value was provided
|
||||
// in request or comes from the storage, for now we assume it's user's fault.
|
||||
bo.write(*unwrap_bytes(v, true));
|
||||
}
|
||||
void operator()(const boolean_type_impl& t) const {
|
||||
bo.write(boolean_type->decompose(v.GetBool()));
|
||||
}
|
||||
void operator()(const decimal_type_impl& t) const {
|
||||
bo.write(decimal_type->decompose(parse_and_validate_number(rjson::to_string_view(v))));
|
||||
}
|
||||
// default
|
||||
void operator()(const abstract_type& t) const {
|
||||
bo.write(from_json_object(t, v));
|
||||
}
|
||||
};
|
||||
|
||||
bytes serialize_item(const rjson::value& item) {
|
||||
if (item.IsNull() || item.MemberCount() != 1) {
|
||||
throw api_error::validation(format("An item can contain only one attribute definition: {}", item));
|
||||
}
|
||||
auto it = item.MemberBegin();
|
||||
type_info type_info = type_info_from_string(rjson::to_string_view(it->name)); // JSON keys are guaranteed to be strings
|
||||
|
||||
if (type_info.atype == alternator_type::NOT_SUPPORTED_YET) {
|
||||
slogger.trace("Non-optimal serialization of type {}", it->name);
|
||||
return bytes{int8_t(type_info.atype)} + to_bytes(rjson::print(item));
|
||||
}
|
||||
|
||||
bytes_ostream bo;
|
||||
bo.write(bytes{int8_t(type_info.atype)});
|
||||
visit(*type_info.dtype, from_json_visitor{it->value, bo});
|
||||
|
||||
return bytes(bo.linearize());
|
||||
}
|
||||
|
||||
struct to_json_visitor {
|
||||
rjson::value& deserialized;
|
||||
const std::string& type_ident;
|
||||
bytes_view bv;
|
||||
|
||||
void operator()(const reversed_type_impl& t) const { visit(*t.underlying_type(), to_json_visitor{deserialized, type_ident, bv}); };
|
||||
void operator()(const decimal_type_impl& t) const {
|
||||
auto s = to_json_string(*decimal_type, bytes(bv));
|
||||
//FIXME(sarna): unnecessary copy
|
||||
rjson::add_with_string_name(deserialized, type_ident, rjson::from_string(s));
|
||||
}
|
||||
void operator()(const string_type_impl& t) {
|
||||
rjson::add_with_string_name(deserialized, type_ident, rjson::from_string(reinterpret_cast<const char *>(bv.data()), bv.size()));
|
||||
}
|
||||
void operator()(const bytes_type_impl& t) const {
|
||||
std::string b64 = base64_encode(bv);
|
||||
rjson::add_with_string_name(deserialized, type_ident, rjson::from_string(b64));
|
||||
}
|
||||
// default
|
||||
void operator()(const abstract_type& t) const {
|
||||
rjson::add_with_string_name(deserialized, type_ident, rjson::parse(to_json_string(t, bytes(bv))));
|
||||
}
|
||||
};
|
||||
|
||||
rjson::value deserialize_item(bytes_view bv) {
|
||||
rjson::value deserialized(rapidjson::kObjectType);
|
||||
if (bv.empty()) {
|
||||
throw api_error::validation("Serialized value empty");
|
||||
}
|
||||
|
||||
alternator_type atype = alternator_type(bv[0]);
|
||||
bv.remove_prefix(1);
|
||||
|
||||
if (atype == alternator_type::NOT_SUPPORTED_YET) {
|
||||
slogger.trace("Non-optimal deserialization of alternator type {}", int8_t(atype));
|
||||
return rjson::parse(std::string_view(reinterpret_cast<const char *>(bv.data()), bv.size()));
|
||||
}
|
||||
type_representation type_representation = represent_type(atype);
|
||||
visit(*type_representation.dtype, to_json_visitor{deserialized, type_representation.ident, bv});
|
||||
|
||||
return deserialized;
|
||||
}
|
||||
|
||||
std::string type_to_string(data_type type) {
|
||||
static thread_local std::unordered_map<data_type, std::string> types = {
|
||||
{utf8_type, "S"},
|
||||
{bytes_type, "B"},
|
||||
{boolean_type, "BOOL"},
|
||||
{decimal_type, "N"}, // FIXME: use a specialized Alternator number type instead of the general decimal_type
|
||||
};
|
||||
auto it = types.find(type);
|
||||
if (it == types.end()) {
|
||||
// fall back to string, in order to be able to present
|
||||
// internal Scylla types in a human-readable way
|
||||
return "S";
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
bytes get_key_column_value(const rjson::value& item, const column_definition& column) {
|
||||
std::string column_name = column.name_as_text();
|
||||
const rjson::value* key_typed_value = rjson::find(item, column_name);
|
||||
if (!key_typed_value) {
|
||||
throw api_error::validation(format("Key column {} not found", column_name));
|
||||
}
|
||||
return get_key_from_typed_value(*key_typed_value, column);
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry whose key is the type and the value is the encoded value.
|
||||
// If this type does not match the desired "type_str", an api_error::validation
|
||||
// error is thrown (the "name" parameter is the name of the column which will
|
||||
// mentioned in the exception message).
|
||||
// If the type does match, a reference to the encoded value is returned.
|
||||
static const rjson::value& get_typed_value(const rjson::value& key_typed_value, std::string_view type_str, std::string_view name, std::string_view value_name) {
|
||||
if (!key_typed_value.IsObject() || key_typed_value.MemberCount() != 1 ||
|
||||
!key_typed_value.MemberBegin()->value.IsString()) {
|
||||
throw api_error::validation(
|
||||
format("Malformed value object for {} {}: {}",
|
||||
value_name, name, key_typed_value));
|
||||
}
|
||||
|
||||
auto it = key_typed_value.MemberBegin();
|
||||
if (rjson::to_string_view(it->name) != type_str) {
|
||||
throw api_error::validation(
|
||||
format("Type mismatch: expected type {} for {} {}, got type {}",
|
||||
type_str, value_name, name, it->name));
|
||||
}
|
||||
return it->value;
|
||||
}
|
||||
|
||||
// Parses the JSON encoding for a key value, which is a map with a single
|
||||
// entry, whose key is the type (expected to match the key column's type)
|
||||
// and the value is the encoded value.
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column) {
|
||||
auto& value = get_typed_value(key_typed_value, type_to_string(column.type), column.name_as_text(), "key column");
|
||||
std::string_view value_view = rjson::to_string_view(value);
|
||||
if (value_view.empty()) {
|
||||
throw api_error::validation(
|
||||
format("The AttributeValue for a key attribute cannot contain an empty string value. Key: {}", column.name_as_text()));
|
||||
}
|
||||
if (column.type == bytes_type) {
|
||||
// FIXME: it's difficult at this point to get information if value was provided
|
||||
// in request or comes from the storage, for now we assume it's user's fault.
|
||||
return *unwrap_bytes(value, true);
|
||||
} else if (column.type == decimal_type) {
|
||||
return decimal_type->decompose(parse_and_validate_number(rjson::to_string_view(value)));
|
||||
} else {
|
||||
return column.type->from_string(value_view);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
rjson::value json_key_column_value(bytes_view cell, const column_definition& column) {
|
||||
if (column.type == bytes_type) {
|
||||
std::string b64 = base64_encode(cell);
|
||||
return rjson::from_string(b64);
|
||||
} if (column.type == utf8_type) {
|
||||
return rjson::from_string(reinterpret_cast<const char*>(cell.data()), cell.size());
|
||||
} else if (column.type == decimal_type) {
|
||||
// FIXME: use specialized Alternator number type, not the more
|
||||
// general "decimal_type". A dedicated type can be more efficient
|
||||
// in storage space and in parsing speed.
|
||||
auto s = to_json_string(*decimal_type, bytes(cell));
|
||||
return rjson::from_string(s);
|
||||
} else {
|
||||
// Support for arbitrary key types is useful for parsing values of virtual tables,
|
||||
// which can involve any type supported by Scylla.
|
||||
// In order to guarantee that the returned type is parsable by alternator clients,
|
||||
// they are represented simply as strings.
|
||||
return rjson::from_string(column.type->to_string(bytes(cell)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
partition_key pk_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
std::vector<bytes> raw_pk;
|
||||
// FIXME: this is a loop, but we really allow only one partition key column.
|
||||
for (const column_definition& cdef : schema->partition_key_columns()) {
|
||||
bytes raw_value = get_key_column_value(item, cdef);
|
||||
raw_pk.push_back(std::move(raw_value));
|
||||
}
|
||||
return partition_key::from_exploded(raw_pk);
|
||||
}
|
||||
|
||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
if (schema->clustering_key_size() == 0) {
|
||||
return clustering_key::make_empty();
|
||||
}
|
||||
std::vector<bytes> raw_ck;
|
||||
// FIXME: this is a loop, but we really allow only one clustering key column.
|
||||
for (const column_definition& cdef : schema->clustering_key_columns()) {
|
||||
bytes raw_value = get_key_column_value(item, cdef);
|
||||
raw_ck.push_back(std::move(raw_value));
|
||||
}
|
||||
|
||||
return clustering_key::from_exploded(raw_ck);
|
||||
}
|
||||
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema) {
|
||||
auto ck = ck_from_json(item, schema);
|
||||
if (is_alternator_keyspace(schema->ks_name())) {
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
const auto region_item = rjson::find(item, scylla_paging_region);
|
||||
const auto weight_item = rjson::find(item, scylla_paging_weight);
|
||||
if (bool(region_item) != bool(weight_item)) {
|
||||
throw api_error::validation("Malformed value object: region and weight has to be either both missing or both present");
|
||||
}
|
||||
bound_weight weight;
|
||||
if (region_item) {
|
||||
auto region_view = rjson::to_string_view(get_typed_value(*region_item, "S", scylla_paging_region, "key region"));
|
||||
auto weight_view = rjson::to_string_view(get_typed_value(*weight_item, "N", scylla_paging_weight, "key weight"));
|
||||
auto region = parse_partition_region(region_view);
|
||||
if (weight_view == "-1") {
|
||||
weight = bound_weight::before_all_prefixed;
|
||||
} else if (weight_view == "0") {
|
||||
weight = bound_weight::equal;
|
||||
} else if (weight_view == "1") {
|
||||
weight = bound_weight::after_all_prefixed;
|
||||
} else {
|
||||
throw std::runtime_error(fmt::format("Invalid value for weight: {}", weight_view));
|
||||
}
|
||||
return position_in_partition(region, weight, region == partition_region::clustered ? std::optional(std::move(ck)) : std::nullopt);
|
||||
}
|
||||
if (ck.is_empty()) {
|
||||
return position_in_partition::for_partition_start();
|
||||
}
|
||||
return position_in_partition::for_key(std::move(ck));
|
||||
}
|
||||
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
throw api_error::validation(format("{}: invalid number object", diagnostic));
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
if (it->name != "N") {
|
||||
throw api_error::validation(format("{}: expected number, found type '{}'", diagnostic, it->name));
|
||||
}
|
||||
if (!it->value.IsString()) {
|
||||
// We shouldn't reach here. Callers normally validate their input
|
||||
// earlier with validate_value().
|
||||
throw api_error::validation(format("{}: improperly formatted number constant", diagnostic));
|
||||
}
|
||||
big_decimal ret = parse_and_validate_number(rjson::to_string_view(it->value));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<big_decimal> try_unwrap_number(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return std::nullopt;
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
if (it->name != "N" || !it->value.IsString()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
try {
|
||||
return parse_and_validate_number(rjson::to_string_view(it->value));
|
||||
} catch (api_error&) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<bytes> unwrap_bytes(const rjson::value& value, bool from_query) {
|
||||
try {
|
||||
return rjson::base64_decode(value);
|
||||
} catch (...) {
|
||||
if (from_query) {
|
||||
throw api_error::serialization(format("Invalid base64 data"));
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return {"", nullptr};
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
const std::string it_key = it->name.GetString();
|
||||
if (it_key != "SS" && it_key != "BS" && it_key != "NS") {
|
||||
return {std::move(it_key), nullptr};
|
||||
}
|
||||
return std::make_pair(it_key, &(it->value));
|
||||
}
|
||||
|
||||
const rjson::value* unwrap_list(const rjson::value& v) {
|
||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||
return nullptr;
|
||||
}
|
||||
auto it = v.MemberBegin();
|
||||
if (it->name != std::string("L")) {
|
||||
return nullptr;
|
||||
}
|
||||
return &(it->value);
|
||||
}
|
||||
|
||||
// Take two JSON-encoded numeric values ({"N": "thenumber"}) and return the
|
||||
// sum, again as a JSON-encoded number.
|
||||
rjson::value number_add(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto n1 = unwrap_number(v1, "UpdateExpression");
|
||||
auto n2 = unwrap_number(v2, "UpdateExpression");
|
||||
rjson::value ret = rjson::empty_object();
|
||||
sstring str_ret = (n1 + n2).to_string();
|
||||
rjson::add(ret, "N", rjson::from_string(str_ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
rjson::value number_subtract(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto n1 = unwrap_number(v1, "UpdateExpression");
|
||||
auto n2 = unwrap_number(v2, "UpdateExpression");
|
||||
rjson::value ret = rjson::empty_object();
|
||||
sstring str_ret = (n1 - n2).to_string();
|
||||
rjson::add(ret, "N", rjson::from_string(str_ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual set]}) and
|
||||
// return the sum of both sets, again as a set value.
|
||||
rjson::value set_sum(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto [set1_type, set1] = unwrap_set(v1);
|
||||
auto [set2_type, set2] = unwrap_set(v2);
|
||||
if (set1_type != set2_type) {
|
||||
throw api_error::validation(format("Mismatched set types: {} and {}", set1_type, set2_type));
|
||||
}
|
||||
if (!set1 || !set2) {
|
||||
throw api_error::validation("UpdateExpression: ADD operation for sets must be given sets as arguments");
|
||||
}
|
||||
rjson::value sum = rjson::copy(*set1);
|
||||
std::set<rjson::value, rjson::single_value_comp> set1_raw;
|
||||
for (auto it = sum.Begin(); it != sum.End(); ++it) {
|
||||
set1_raw.insert(rjson::copy(*it));
|
||||
}
|
||||
for (const auto& a : set2->GetArray()) {
|
||||
if (!set1_raw.contains(a)) {
|
||||
rjson::push_back(sum, rjson::copy(a));
|
||||
}
|
||||
}
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::add_with_string_name(ret, set1_type, std::move(sum));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual list]}) and
|
||||
// return the difference of s1 - s2, again as a set value.
|
||||
// DynamoDB does not allow empty sets, so if resulting set is empty, return
|
||||
// an unset optional instead.
|
||||
std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value& v2) {
|
||||
auto [set1_type, set1] = unwrap_set(v1);
|
||||
auto [set2_type, set2] = unwrap_set(v2);
|
||||
if (set1_type != set2_type) {
|
||||
throw api_error::validation(format("Set DELETE type mismatch: {} and {}", set1_type, set2_type));
|
||||
}
|
||||
if (!set1 || !set2) {
|
||||
throw api_error::validation("UpdateExpression: DELETE operation can only be performed on a set");
|
||||
}
|
||||
std::set<rjson::value, rjson::single_value_comp> set1_raw;
|
||||
for (auto it = set1->Begin(); it != set1->End(); ++it) {
|
||||
set1_raw.insert(rjson::copy(*it));
|
||||
}
|
||||
for (const auto& a : set2->GetArray()) {
|
||||
set1_raw.erase(a);
|
||||
}
|
||||
if (set1_raw.empty()) {
|
||||
return std::nullopt;
|
||||
}
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::add_with_string_name(ret, set1_type, rjson::empty_array());
|
||||
rjson::value& result_set = ret[set1_type];
|
||||
for (const auto& a : set1_raw) {
|
||||
rjson::push_back(result_set, rjson::copy(a));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Take two JSON-encoded list values (remember that a list value is
|
||||
// {"L": [...the actual list]}) and return the concatenation, again as
|
||||
// a list value.
|
||||
// Returns a null value if one of the arguments is not actually a list.
|
||||
rjson::value list_concatenate(const rjson::value& v1, const rjson::value& v2) {
|
||||
const rjson::value* list1 = unwrap_list(v1);
|
||||
const rjson::value* list2 = unwrap_list(v2);
|
||||
if (!list1 || !list2) {
|
||||
return rjson::null_value();
|
||||
}
|
||||
rjson::value cat = rjson::copy(*list1);
|
||||
for (const auto& a : list2->GetArray()) {
|
||||
rjson::push_back(cat, rjson::copy(a));
|
||||
}
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::add(ret, "L", std::move(cat));
|
||||
return ret;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <optional>
|
||||
#include "types/types.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "keys.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
|
||||
class position_in_partition;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
enum class alternator_type : int8_t {
|
||||
S, B, BOOL, N, NOT_SUPPORTED_YET
|
||||
};
|
||||
|
||||
struct type_info {
|
||||
alternator_type atype;
|
||||
data_type dtype;
|
||||
};
|
||||
|
||||
struct type_representation {
|
||||
std::string ident;
|
||||
data_type dtype;
|
||||
};
|
||||
|
||||
inline constexpr std::string_view scylla_paging_region(":scylla:paging:region");
|
||||
inline constexpr std::string_view scylla_paging_weight(":scylla:paging:weight");
|
||||
|
||||
type_info type_info_from_string(std::string_view type);
|
||||
type_representation represent_type(alternator_type atype);
|
||||
|
||||
bytes serialize_item(const rjson::value& item);
|
||||
rjson::value deserialize_item(bytes_view bv);
|
||||
|
||||
std::string type_to_string(data_type type);
|
||||
|
||||
bytes get_key_column_value(const rjson::value& item, const column_definition& column);
|
||||
bytes get_key_from_typed_value(const rjson::value& key_typed_value, const column_definition& column);
|
||||
rjson::value json_key_column_value(bytes_view cell, const column_definition& column);
|
||||
|
||||
partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
|
||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
|
||||
|
||||
// If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
|
||||
// raises ValidationException with diagnostic.
|
||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
||||
|
||||
// try_unwrap_number is like unwrap_number, but returns an unset optional
|
||||
// when the given v does not encode a number.
|
||||
std::optional<big_decimal> try_unwrap_number(const rjson::value& v);
|
||||
|
||||
// unwrap_bytes decodes byte value, on decoding failure it either raises api_error::serialization
|
||||
// iff from_query is true or returns unset optional iff from_query is false.
|
||||
// Therefore it's safe to dereference returned optional when called with from_query equal true.
|
||||
std::optional<bytes> unwrap_bytes(const rjson::value& value, bool from_query);
|
||||
|
||||
// Check if a given JSON object encodes a set (i.e., it is a {"SS": [...]}, or "NS", "BS"
|
||||
// and returns set's type and a pointer to that set. If the object does not encode a set,
|
||||
// returned value is {"", nullptr}
|
||||
const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value& v);
|
||||
|
||||
// Check if a given JSON object encodes a list (i.e., it is a {"L": [...]}
|
||||
// and returns a pointer to that list.
|
||||
const rjson::value* unwrap_list(const rjson::value& v);
|
||||
|
||||
// Take two JSON-encoded numeric values ({"N": "thenumber"}) and return the
|
||||
// sum, again as a JSON-encoded number.
|
||||
rjson::value number_add(const rjson::value& v1, const rjson::value& v2);
|
||||
rjson::value number_subtract(const rjson::value& v1, const rjson::value& v2);
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual set]}) and
|
||||
// return the sum of both sets, again as a set value.
|
||||
rjson::value set_sum(const rjson::value& v1, const rjson::value& v2);
|
||||
// Take two JSON-encoded set values (e.g. {"SS": [...the actual list]}) and
|
||||
// return the difference of s1 - s2, again as a set value.
|
||||
// DynamoDB does not allow empty sets, so if resulting set is empty, return
|
||||
// an unset optional instead.
|
||||
std::optional<rjson::value> set_diff(const rjson::value& v1, const rjson::value& v2);
|
||||
// Take two JSON-encoded list values (remember that a list value is
|
||||
// {"L": [...the actual list]}) and return the concatenation, again as
|
||||
// a list value.
|
||||
// Returns a null value if one of the arguments is not actually a list.
|
||||
rjson::value list_concatenate(const rjson::value& v1, const rjson::value& v2);
|
||||
|
||||
namespace internal {
|
||||
struct magnitude_and_precision {
|
||||
int magnitude;
|
||||
int precision;
|
||||
};
|
||||
magnitude_and_precision get_magnitude_and_precision(std::string_view);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,648 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "alternator/server.hh"
|
||||
#include "log.hh"
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/http/function_handlers.hh>
|
||||
#include <seastar/http/short_streams.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <seastar/util/defer.hh>
|
||||
#include <seastar/util/short_streams.hh>
|
||||
#include "seastarx.hh"
|
||||
#include "error.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "auth.hh"
|
||||
#include <cctype>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "utils/aws_sigv4.hh"
|
||||
|
||||
static logging::logger slogger("alternator-server");
|
||||
|
||||
using namespace httpd;
|
||||
using request = http::request;
|
||||
using reply = http::reply;
|
||||
|
||||
namespace alternator {
|
||||
|
||||
inline std::vector<std::string_view> split(std::string_view text, char separator) {
|
||||
std::vector<std::string_view> tokens;
|
||||
if (text == "") {
|
||||
return tokens;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
auto pos = text.find_first_of(separator);
|
||||
if (pos != std::string_view::npos) {
|
||||
tokens.emplace_back(text.data(), pos);
|
||||
text.remove_prefix(pos + 1);
|
||||
} else {
|
||||
tokens.emplace_back(text);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return tokens;
|
||||
}
|
||||
|
||||
// Handle CORS (Cross-origin resource sharing) in the HTTP request:
|
||||
// If the request has the "Origin" header specifying where the script which
|
||||
// makes this request comes from, we need to reply with the header
|
||||
// "Access-Control-Allow-Origin: *" saying that this (and any) origin is fine.
|
||||
// Additionally, if preflight==true (i.e., this is an OPTIONS request),
|
||||
// the script can also "request" in headers that the server allows it to use
|
||||
// some HTTP methods and headers in the followup request, and the server
|
||||
// should respond by "allowing" them in the response headers.
|
||||
// We also add the header "Access-Control-Expose-Headers" to let the script
|
||||
// access additional headers in the response.
|
||||
// This handle_CORS() should be used when handling any HTTP method - both the
|
||||
// usual GET and POST, and also the "preflight" OPTIONS method.
|
||||
static void handle_CORS(const request& req, reply& rep, bool preflight) {
|
||||
if (!req.get_header("origin").empty()) {
|
||||
rep.add_header("Access-Control-Allow-Origin", "*");
|
||||
// This is the list that DynamoDB returns for expose headers. I am
|
||||
// not sure why not just return "*" here, what's the risk?
|
||||
rep.add_header("Access-Control-Expose-Headers", "x-amzn-RequestId,x-amzn-ErrorType,x-amzn-ErrorMessage,Date");
|
||||
if (preflight) {
|
||||
sstring s = req.get_header("Access-Control-Request-Headers");
|
||||
if (!s.empty()) {
|
||||
rep.add_header("Access-Control-Allow-Headers", std::move(s));
|
||||
}
|
||||
s = req.get_header("Access-Control-Request-Method");
|
||||
if (!s.empty()) {
|
||||
rep.add_header("Access-Control-Allow-Methods", std::move(s));
|
||||
}
|
||||
// Our CORS response never change anyway, let the browser cache it
|
||||
// for two hours (Chrome's maximum):
|
||||
rep.add_header("Access-Control-Max-Age", "7200");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DynamoDB HTTP error responses are structured as follows
|
||||
// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html
|
||||
// Our handlers throw an exception to report an error. If the exception
|
||||
// is of type alternator::api_error, it unwrapped and properly reported to
|
||||
// the user directly. Other exceptions are unexpected, and reported as
|
||||
// Internal Server Error.
|
||||
class api_handler : public handler_base {
|
||||
public:
|
||||
api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle) : _f_handle(
|
||||
[this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
|
||||
return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped([this, rep = std::move(rep)](future<executor::request_return_type> resf) mutable {
|
||||
if (resf.failed()) {
|
||||
// Exceptions of type api_error are wrapped as JSON and
|
||||
// returned to the client as expected. Other types of
|
||||
// exceptions are unexpected, and returned to the user
|
||||
// as an internal server error:
|
||||
try {
|
||||
resf.get();
|
||||
} catch (api_error &ae) {
|
||||
generate_error_reply(*rep, ae);
|
||||
} catch (rjson::error & re) {
|
||||
generate_error_reply(*rep,
|
||||
api_error::validation(re.what()));
|
||||
} catch (...) {
|
||||
generate_error_reply(*rep,
|
||||
api_error::internal(format("Internal server error: {}", std::current_exception())));
|
||||
}
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
auto res = resf.get();
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const json::json_return_type& json_return_value) {
|
||||
slogger.trace("api_handler success case");
|
||||
if (json_return_value._body_writer) {
|
||||
// Unfortunately, write_body() forces us to choose
|
||||
// from a fixed and irrelevant list of "mime-types"
|
||||
// at this point. But we'll override it with the
|
||||
// one (application/x-amz-json-1.0) below.
|
||||
rep->write_body("json", std::move(json_return_value._body_writer));
|
||||
} else {
|
||||
rep->_content += json_return_value._res;
|
||||
}
|
||||
},
|
||||
[&] (const api_error& err) {
|
||||
generate_error_reply(*rep, err);
|
||||
}
|
||||
}, res);
|
||||
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
});
|
||||
}) { }
|
||||
|
||||
api_handler(const api_handler&) = default;
|
||||
future<std::unique_ptr<reply>> handle(const sstring& path,
|
||||
std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
handle_CORS(*req, *rep, false);
|
||||
return _f_handle(std::move(req), std::move(rep)).then(
|
||||
[](std::unique_ptr<reply> rep) {
|
||||
rep->set_mime_type("application/x-amz-json-1.0");
|
||||
rep->done();
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
});
|
||||
}
|
||||
|
||||
protected:
|
||||
void generate_error_reply(reply& rep, const api_error& err) {
|
||||
rjson::value results = rjson::empty_object();
|
||||
if (!err._extra_fields.IsNull() && err._extra_fields.IsObject()) {
|
||||
results = rjson::copy(err._extra_fields);
|
||||
}
|
||||
rjson::add(results, "__type", rjson::from_string("com.amazonaws.dynamodb.v20120810#" + err._type));
|
||||
rjson::add(results, "message", err._msg);
|
||||
rep._content = rjson::print(std::move(results));
|
||||
rep._status = err._http_code;
|
||||
slogger.trace("api_handler error case: {}", rep._content);
|
||||
}
|
||||
|
||||
future_handler_function _f_handle;
|
||||
};
|
||||
|
||||
class gated_handler : public handler_base {
|
||||
seastar::gate& _gate;
|
||||
public:
|
||||
gated_handler(seastar::gate& gate) : _gate(gate) {}
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) = 0;
|
||||
virtual future<std::unique_ptr<reply>> handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) final override {
|
||||
return with_gate(_gate, [this, &path, req = std::move(req), rep = std::move(rep)] () mutable {
|
||||
return do_handle(path, std::move(req), std::move(rep));
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
class health_handler : public gated_handler {
|
||||
public:
|
||||
health_handler(seastar::gate& pending_requests) : gated_handler(pending_requests) {}
|
||||
protected:
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
handle_CORS(*req, *rep, false);
|
||||
rep->set_status(reply::status_type::ok);
|
||||
rep->write_body("txt", format("healthy: {}", req->get_header("Host")));
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
};
|
||||
|
||||
class local_nodelist_handler : public gated_handler {
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
public:
|
||||
local_nodelist_handler(seastar::gate& pending_requests, service::storage_proxy& proxy, gms::gossiper& gossiper)
|
||||
: gated_handler(pending_requests)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(gossiper) {}
|
||||
protected:
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
rjson::value results = rjson::empty_array();
|
||||
// It's very easy to get a list of all live nodes on the cluster,
|
||||
// using _gossiper().get_live_members(). But getting
|
||||
// just the list of live nodes in this DC needs more elaborate code:
|
||||
auto& topology = _proxy.get_token_metadata_ptr()->get_topology();
|
||||
sstring local_dc = topology.get_datacenter();
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes = topology.get_datacenter_endpoints().at(local_dc);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
// Note that it's not enough for the node to be is_alive() - a
|
||||
// node joining the cluster is also "alive" but not responsive to
|
||||
// requests. We alive *and* normal. See #19694, #21538.
|
||||
if (_gossiper.is_alive(ip) && _gossiper.is_normal(ip)) {
|
||||
// Use the gossiped broadcast_rpc_address if available instead
|
||||
// of the internal IP address "ip". See discussion in #18711.
|
||||
rjson::push_back(results, rjson::from_string(_gossiper.get_rpc_address(ip)));
|
||||
}
|
||||
}
|
||||
rep->set_status(reply::status_type::ok);
|
||||
rep->set_content_type("json");
|
||||
rep->_content = rjson::print(results);
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
};
|
||||
|
||||
// The CORS (Cross-origin resource sharing) protocol can send an OPTIONS
|
||||
// request before ("pre-flight") the main request. The response to this
|
||||
// request can be empty, but needs to have the right headers (which we
|
||||
// fill with handle_CORS())
|
||||
class options_handler : public gated_handler {
|
||||
public:
|
||||
options_handler(seastar::gate& pending_requests) : gated_handler(pending_requests) {}
|
||||
protected:
|
||||
virtual future<std::unique_ptr<reply>> do_handle(const sstring& path, std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
|
||||
handle_CORS(*req, *rep, true);
|
||||
rep->set_status(reply::status_type::ok);
|
||||
rep->write_body("txt", sstring(""));
|
||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||
}
|
||||
};
|
||||
|
||||
future<std::string> server::verify_signature(const request& req, const chunked_content& content) {
|
||||
if (!_enforce_authorization) {
|
||||
slogger.debug("Skipping authorization");
|
||||
return make_ready_future<std::string>();
|
||||
}
|
||||
auto host_it = req._headers.find("Host");
|
||||
if (host_it == req._headers.end()) {
|
||||
throw api_error::invalid_signature("Host header is mandatory for signature verification");
|
||||
}
|
||||
auto authorization_it = req._headers.find("Authorization");
|
||||
if (authorization_it == req._headers.end()) {
|
||||
throw api_error::missing_authentication_token("Authorization header is mandatory for signature verification");
|
||||
}
|
||||
std::string host = host_it->second;
|
||||
std::string_view authorization_header = authorization_it->second;
|
||||
auto pos = authorization_header.find_first_of(' ');
|
||||
if (pos == std::string_view::npos || authorization_header.substr(0, pos) != "AWS4-HMAC-SHA256") {
|
||||
throw api_error::invalid_signature(format("Authorization header must use AWS4-HMAC-SHA256 algorithm: {}", authorization_header));
|
||||
}
|
||||
authorization_header.remove_prefix(pos+1);
|
||||
std::string credential;
|
||||
std::string user_signature;
|
||||
std::string signed_headers_str;
|
||||
std::vector<std::string_view> signed_headers;
|
||||
do {
|
||||
// Either one of a comma or space can mark the end of an entry
|
||||
pos = authorization_header.find_first_of(" ,");
|
||||
std::string_view entry = authorization_header.substr(0, pos);
|
||||
if (pos != std::string_view::npos) {
|
||||
authorization_header.remove_prefix(pos + 1);
|
||||
}
|
||||
if (entry.empty()) {
|
||||
continue;
|
||||
}
|
||||
std::vector<std::string_view> entry_split = split(entry, '=');
|
||||
if (entry_split.size() != 2) {
|
||||
continue;
|
||||
}
|
||||
std::string_view auth_value = entry_split[1];
|
||||
if (entry_split[0] == "Credential") {
|
||||
credential = std::string(auth_value);
|
||||
} else if (entry_split[0] == "Signature") {
|
||||
user_signature = std::string(auth_value);
|
||||
} else if (entry_split[0] == "SignedHeaders") {
|
||||
signed_headers_str = std::string(auth_value);
|
||||
signed_headers = split(auth_value, ';');
|
||||
std::sort(signed_headers.begin(), signed_headers.end());
|
||||
}
|
||||
} while (pos != std::string_view::npos);
|
||||
|
||||
std::vector<std::string_view> credential_split = split(credential, '/');
|
||||
if (credential_split.size() != 5) {
|
||||
throw api_error::validation(format("Incorrect credential information format: {}", credential));
|
||||
}
|
||||
std::string user(credential_split[0]);
|
||||
std::string datestamp(credential_split[1]);
|
||||
std::string region(credential_split[2]);
|
||||
std::string service(credential_split[3]);
|
||||
|
||||
std::map<std::string_view, std::string_view> signed_headers_map;
|
||||
for (const auto& header : signed_headers) {
|
||||
signed_headers_map.emplace(header, std::string_view());
|
||||
}
|
||||
for (auto& header : req._headers) {
|
||||
std::string header_str;
|
||||
header_str.resize(header.first.size());
|
||||
std::transform(header.first.begin(), header.first.end(), header_str.begin(), ::tolower);
|
||||
auto it = signed_headers_map.find(header_str);
|
||||
if (it != signed_headers_map.end()) {
|
||||
it->second = std::string_view(header.second);
|
||||
}
|
||||
}
|
||||
|
||||
auto cache_getter = [&proxy = _proxy, &as = _auth_service] (std::string username) {
|
||||
return get_key_from_roles(proxy, as, std::move(username));
|
||||
};
|
||||
return _key_cache.get_ptr(user, cache_getter).then([this, &req, &content,
|
||||
user = std::move(user),
|
||||
host = std::move(host),
|
||||
datestamp = std::move(datestamp),
|
||||
signed_headers_str = std::move(signed_headers_str),
|
||||
signed_headers_map = std::move(signed_headers_map),
|
||||
region = std::move(region),
|
||||
service = std::move(service),
|
||||
user_signature = std::move(user_signature)] (key_cache::value_ptr key_ptr) {
|
||||
std::string signature;
|
||||
try {
|
||||
signature = utils::aws::get_signature(user, *key_ptr, std::string_view(host), "/", req._method,
|
||||
datestamp, signed_headers_str, signed_headers_map, &content, region, service, "");
|
||||
} catch (const std::exception& e) {
|
||||
throw api_error::invalid_signature(e.what());
|
||||
}
|
||||
|
||||
if (signature != std::string_view(user_signature)) {
|
||||
_key_cache.remove(user);
|
||||
throw api_error::unrecognized_client("The security token included in the request is invalid.");
|
||||
}
|
||||
return user;
|
||||
});
|
||||
}
|
||||
|
||||
static tracing::trace_state_ptr create_tracing_session(tracing::tracing& tracing_instance) {
|
||||
tracing::trace_state_props_set props;
|
||||
props.set<tracing::trace_state_props::full_tracing>();
|
||||
props.set_if<tracing::trace_state_props::log_slow_query>(tracing_instance.slow_query_tracing_enabled());
|
||||
return tracing_instance.create_session(tracing::trace_type::QUERY, props);
|
||||
}
|
||||
|
||||
// truncated_content_view() prints a potentially long chunked_content for
|
||||
// debugging purposes. In the common case when the content is not excessively
|
||||
// long, it just returns a view into the given content, without any copying.
|
||||
// But when the content is very long, it is truncated after some arbitrary
|
||||
// max_len (or one chunk, whichever comes first), with "<truncated>" added at
|
||||
// the end. To do this modification to the string, we need to create a new
|
||||
// std::string, so the caller must pass us a reference to one, "buf", where
|
||||
// we can store the content. The returned view is only alive for as long this
|
||||
// buf is kept alive.
|
||||
static std::string_view truncated_content_view(const chunked_content& content, std::string& buf) {
|
||||
constexpr size_t max_len = 1024;
|
||||
if (content.empty()) {
|
||||
return std::string_view();
|
||||
} else if (content.size() == 1 && content.begin()->size() <= max_len) {
|
||||
return std::string_view(content.begin()->get(), content.begin()->size());
|
||||
} else {
|
||||
buf = std::string(content.begin()->get(), std::min(content.begin()->size(), max_len)) + "<truncated>";
|
||||
return std::string_view(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static tracing::trace_state_ptr maybe_trace_query(service::client_state& client_state, std::string_view username, sstring_view op, const chunked_content& query) {
|
||||
tracing::trace_state_ptr trace_state;
|
||||
tracing::tracing& tracing_instance = tracing::tracing::get_local_tracing_instance();
|
||||
if (tracing_instance.trace_next_query() || tracing_instance.slow_query_tracing_enabled()) {
|
||||
trace_state = create_tracing_session(tracing_instance);
|
||||
std::string buf;
|
||||
tracing::add_session_param(trace_state, "alternator_op", op);
|
||||
tracing::add_query(trace_state, truncated_content_view(query, buf));
|
||||
tracing::begin(trace_state, format("Alternator {}", op), client_state.get_client_address());
|
||||
if (!username.empty()) {
|
||||
tracing::set_username(trace_state, auth::authenticated_user(username));
|
||||
}
|
||||
}
|
||||
return trace_state;
|
||||
}
|
||||
|
||||
future<executor::request_return_type> server::handle_api_request(std::unique_ptr<request> req) {
|
||||
_executor._stats.total_operations++;
|
||||
sstring target = req->get_header("X-Amz-Target");
|
||||
// target is DynamoDB API version followed by a dot '.' and operation type (e.g. CreateTable)
|
||||
auto dot = target.find('.');
|
||||
std::string_view op = (dot == sstring::npos) ? std::string_view() : std::string_view(target).substr(dot+1);
|
||||
// JSON parsing can allocate up to roughly 2x the size of the raw
|
||||
// document, + a couple of bytes for maintenance.
|
||||
// TODO: consider the case where req->content_length is missing. Maybe
|
||||
// we need to take the content_length_limit and return some of the units
|
||||
// when we finish read_content_and_verify_signature?
|
||||
size_t mem_estimate = req->content_length * 2 + 8000;
|
||||
auto units_fut = get_units(*_memory_limiter, mem_estimate);
|
||||
if (_memory_limiter->waiters()) {
|
||||
++_executor._stats.requests_blocked_memory;
|
||||
}
|
||||
auto units = co_await std::move(units_fut);
|
||||
assert(req->content_stream);
|
||||
chunked_content content = co_await util::read_entire_stream(*req->content_stream);
|
||||
auto username = co_await verify_signature(*req, content);
|
||||
|
||||
if (slogger.is_enabled(log_level::trace)) {
|
||||
std::string buf;
|
||||
slogger.trace("Request: {} {} {}", op, truncated_content_view(content, buf), req->_headers);
|
||||
}
|
||||
auto callback_it = _callbacks.find(op);
|
||||
if (callback_it == _callbacks.end()) {
|
||||
_executor._stats.unsupported_operations++;
|
||||
co_return api_error::unknown_operation(format("Unsupported operation {}", op));
|
||||
}
|
||||
if (_pending_requests.get_count() >= _max_concurrent_requests) {
|
||||
_executor._stats.requests_shed++;
|
||||
co_return api_error::request_limit_exceeded(format("too many in-flight requests (configured via max_concurrent_requests_per_shard): {}", _pending_requests.get_count()));
|
||||
}
|
||||
_pending_requests.enter();
|
||||
auto leave = defer([this] () noexcept { _pending_requests.leave(); });
|
||||
//FIXME: Client state can provide more context, e.g. client's endpoint address
|
||||
// We use unique_ptr because client_state cannot be moved or copied
|
||||
executor::client_state client_state = username.empty()
|
||||
? service::client_state{service::client_state::internal_tag()}
|
||||
: service::client_state{service::client_state::internal_tag(), _auth_service, _sl_controller, username};
|
||||
co_await client_state.maybe_update_per_service_level_params();
|
||||
|
||||
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content);
|
||||
tracing::trace(trace_state, "{}", op);
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
co_return co_await callback_it->second(_executor, client_state, trace_state,
|
||||
make_service_permit(std::move(units)), std::move(json_request), std::move(req));
|
||||
}
|
||||
|
||||
void server::set_routes(routes& r) {
|
||||
api_handler* req_handler = new api_handler([this] (std::unique_ptr<request> req) mutable {
|
||||
return handle_api_request(std::move(req));
|
||||
});
|
||||
|
||||
r.put(operation_type::POST, "/", req_handler);
|
||||
r.put(operation_type::GET, "/", new health_handler(_pending_requests));
|
||||
// The "/localnodes" request is a new Alternator feature, not supported by
|
||||
// DynamoDB and not required for DynamoDB compatibility. It allows a
|
||||
// client to enquire - using a trivial HTTP request without requiring
|
||||
// authentication - the list of all live nodes in the same data center of
|
||||
// the Alternator cluster. The client can use this list to balance its
|
||||
// request load to all the nodes in the same geographical region.
|
||||
// Note that this API exposes - openly without authentication - the
|
||||
// information on the cluster's members inside one data center. We do not
|
||||
// consider this to be a security risk, because an attacker can already
|
||||
// scan an entire subnet for nodes responding to the health request,
|
||||
// or even just scan for open ports.
|
||||
r.put(operation_type::GET, "/localnodes", new local_nodelist_handler(_pending_requests, _proxy, _gossiper));
|
||||
r.put(operation_type::OPTIONS, "/", new options_handler(_pending_requests));
|
||||
}
|
||||
|
||||
//FIXME: A way to immediately invalidate the cache should be considered,
|
||||
// e.g. when the system table which stores the keys is changed.
|
||||
// For now, this propagation may take up to 1 minute.
|
||||
server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& auth_service, qos::service_level_controller& sl_controller)
|
||||
: _http_server("http-alternator")
|
||||
, _https_server("https-alternator")
|
||||
, _executor(exec)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(gossiper)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _key_cache(1024, 1min, slogger)
|
||||
, _enforce_authorization(false)
|
||||
, _enabled_servers{}
|
||||
, _pending_requests{}
|
||||
, _callbacks{
|
||||
{"CreateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.create_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DeleteTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.delete_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"PutItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.put_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DeleteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.delete_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListTables", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_tables(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"Scan", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.scan(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_endpoints(client_state, std::move(permit), std::move(json_request), req->get_header("Host"));
|
||||
}},
|
||||
{"BatchWriteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.batch_write_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"BatchGetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.batch_get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"Query", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.query(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"TagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.tag_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UntagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.untag_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListTagsOfResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_tags_of_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_time_to_live(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_time_to_live(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListStreams", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_streams(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeStream", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_stream(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetShardIterator", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_shard_iterator(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
} {
|
||||
}
|
||||
|
||||
future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||
bool enforce_authorization, semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests) {
|
||||
_memory_limiter = memory_limiter;
|
||||
_enforce_authorization = enforce_authorization;
|
||||
_max_concurrent_requests = std::move(max_concurrent_requests);
|
||||
if (!port && !https_port) {
|
||||
return make_exception_future<>(std::runtime_error("Either regular port or TLS port"
|
||||
" must be specified in order to init an alternator HTTP server instance"));
|
||||
}
|
||||
return seastar::async([this, addr, port, https_port, creds] {
|
||||
_executor.start().get();
|
||||
|
||||
if (port) {
|
||||
set_routes(_http_server._routes);
|
||||
_http_server.set_content_length_limit(server::content_length_limit);
|
||||
_http_server.set_content_streaming(true);
|
||||
_http_server.listen(socket_address{addr, *port}).get();
|
||||
_enabled_servers.push_back(std::ref(_http_server));
|
||||
}
|
||||
if (https_port) {
|
||||
set_routes(_https_server._routes);
|
||||
_https_server.set_content_length_limit(server::content_length_limit);
|
||||
_https_server.set_content_streaming(true);
|
||||
auto server_creds = creds->build_reloadable_server_credentials([](const std::unordered_set<sstring>& files, std::exception_ptr ep) {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get();
|
||||
_https_server.listen(socket_address{addr, *https_port}, std::move(server_creds)).get();
|
||||
_enabled_servers.push_back(std::ref(_https_server));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
future<> server::stop() {
|
||||
return parallel_for_each(_enabled_servers, [] (http_server& server) {
|
||||
return server.stop();
|
||||
}).then([this] {
|
||||
return _pending_requests.close();
|
||||
}).then([this] {
|
||||
return _json_parser.stop();
|
||||
});
|
||||
}
|
||||
|
||||
server::json_parser::json_parser() : _run_parse_json_thread(async([this] {
|
||||
while (true) {
|
||||
_document_waiting.wait().get();
|
||||
if (_as.abort_requested()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
_parsed_document = rjson::parse_yieldable(std::move(_raw_document));
|
||||
_current_exception = nullptr;
|
||||
} catch (...) {
|
||||
_current_exception = std::current_exception();
|
||||
}
|
||||
_document_parsed.signal();
|
||||
}
|
||||
})) {
|
||||
}
|
||||
|
||||
future<rjson::value> server::json_parser::parse(chunked_content&& content) {
|
||||
if (content.size() < yieldable_parsing_threshold) {
|
||||
return make_ready_future<rjson::value>(rjson::parse(std::move(content)));
|
||||
}
|
||||
return with_semaphore(_parsing_sem, 1, [this, content = std::move(content)] () mutable {
|
||||
_raw_document = std::move(content);
|
||||
_document_waiting.signal();
|
||||
return _document_parsed.wait().then([this] {
|
||||
if (_current_exception) {
|
||||
return make_exception_future<rjson::value>(_current_exception);
|
||||
}
|
||||
return make_ready_future<rjson::value>(std::move(_parsed_document));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<> server::json_parser::stop() {
|
||||
_as.request_abort();
|
||||
_document_waiting.signal();
|
||||
_document_parsed.broken();
|
||||
return std::move(_run_parse_json_thread);
|
||||
}
|
||||
|
||||
const char* api_error::what() const noexcept {
|
||||
if (_what_string.empty()) {
|
||||
_what_string = format("{} {}: {}", std::to_underlying(_http_code), _type, _msg);
|
||||
}
|
||||
return _what_string.c_str();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "alternator/executor.hh"
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/condition-variable.hh>
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include <seastar/net/tls.hh>
|
||||
#include <optional>
|
||||
#include "alternator/auth.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "utils/small_vector.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
#include <seastar/core/units.hh>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
using chunked_content = rjson::chunked_content;
|
||||
|
||||
class server {
|
||||
static constexpr size_t content_length_limit = 16*MB;
|
||||
using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&,
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>)>;
|
||||
using alternator_callbacks_map = std::unordered_map<std::string_view, alternator_callback>;
|
||||
|
||||
httpd::http_server _http_server;
|
||||
httpd::http_server _https_server;
|
||||
executor& _executor;
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
auth::service& _auth_service;
|
||||
qos::service_level_controller& _sl_controller;
|
||||
|
||||
key_cache _key_cache;
|
||||
bool _enforce_authorization;
|
||||
utils::small_vector<std::reference_wrapper<seastar::httpd::http_server>, 2> _enabled_servers;
|
||||
gate _pending_requests;
|
||||
alternator_callbacks_map _callbacks;
|
||||
|
||||
semaphore* _memory_limiter;
|
||||
utils::updateable_value<uint32_t> _max_concurrent_requests;
|
||||
|
||||
class json_parser {
|
||||
static constexpr size_t yieldable_parsing_threshold = 16*KB;
|
||||
chunked_content _raw_document;
|
||||
rjson::value _parsed_document;
|
||||
std::exception_ptr _current_exception;
|
||||
semaphore _parsing_sem{1};
|
||||
condition_variable _document_waiting;
|
||||
condition_variable _document_parsed;
|
||||
abort_source _as;
|
||||
future<> _run_parse_json_thread;
|
||||
public:
|
||||
json_parser();
|
||||
// Moving a chunked_content into parse() allows parse() to free each
|
||||
// chunk as soon as it is parsed, so when chunks are relatively small,
|
||||
// we don't need to store the sum of unparsed and parsed sizes.
|
||||
future<rjson::value> parse(chunked_content&& content);
|
||||
future<> stop();
|
||||
};
|
||||
json_parser _json_parser;
|
||||
|
||||
public:
|
||||
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller);
|
||||
|
||||
future<> init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||
bool enforce_authorization, semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests);
|
||||
future<> stop();
|
||||
private:
|
||||
void set_routes(seastar::httpd::routes& r);
|
||||
// If verification succeeds, returns the authenticated user's username
|
||||
future<std::string> verify_signature(const seastar::http::request&, const chunked_content&);
|
||||
future<executor::request_return_type> handle_api_request(std::unique_ptr<http::request> req);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "stats.hh"
|
||||
#include "utils/histogram_metrics_helper.hh"
|
||||
#include <seastar/core/metrics.hh>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
const char* ALTERNATOR_METRICS = "alternator";
|
||||
|
||||
stats::stats() : api_operations{} {
|
||||
// Register the
|
||||
seastar::metrics::label op("op");
|
||||
|
||||
_metrics.add_group("alternator", {
|
||||
#define OPERATION(name, CamelCaseName) \
|
||||
seastar::metrics::make_total_operations("operation", api_operations.name, \
|
||||
seastar::metrics::description("number of operations via Alternator API"), {op(CamelCaseName)}).set_skip_when_empty(),
|
||||
#define OPERATION_LATENCY(name, CamelCaseName) \
|
||||
seastar::metrics::make_histogram("op_latency", \
|
||||
seastar::metrics::description("Latency histogram of an operation via Alternator API"), {op(CamelCaseName)}, [this]{return to_metrics_histogram(api_operations.name.histogram());}).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), \
|
||||
seastar::metrics::make_summary("op_latency_summary", \
|
||||
seastar::metrics::description("Latency summary of an operation via Alternator API"), [this]{return to_metrics_summary(api_operations.name.summary());})(op(CamelCaseName)).set_skip_when_empty(),
|
||||
OPERATION(batch_get_item, "BatchGetItem")
|
||||
OPERATION(batch_write_item, "BatchWriteItem")
|
||||
OPERATION(create_backup, "CreateBackup")
|
||||
OPERATION(create_global_table, "CreateGlobalTable")
|
||||
OPERATION(create_table, "CreateTable")
|
||||
OPERATION(delete_backup, "DeleteBackup")
|
||||
OPERATION(delete_item, "DeleteItem")
|
||||
OPERATION(delete_table, "DeleteTable")
|
||||
OPERATION(describe_backup, "DescribeBackup")
|
||||
OPERATION(describe_continuous_backups, "DescribeContinuousBackups")
|
||||
OPERATION(describe_endpoints, "DescribeEndpoints")
|
||||
OPERATION(describe_global_table, "DescribeGlobalTable")
|
||||
OPERATION(describe_global_table_settings, "DescribeGlobalTableSettings")
|
||||
OPERATION(describe_limits, "DescribeLimits")
|
||||
OPERATION(describe_table, "DescribeTable")
|
||||
OPERATION(describe_time_to_live, "DescribeTimeToLive")
|
||||
OPERATION(get_item, "GetItem")
|
||||
OPERATION(list_backups, "ListBackups")
|
||||
OPERATION(list_global_tables, "ListGlobalTables")
|
||||
OPERATION(list_tables, "ListTables")
|
||||
OPERATION(list_tags_of_resource, "ListTagsOfResource")
|
||||
OPERATION(put_item, "PutItem")
|
||||
OPERATION(query, "Query")
|
||||
OPERATION(restore_table_from_backup, "RestoreTableFromBackup")
|
||||
OPERATION(restore_table_to_point_in_time, "RestoreTableToPointInTime")
|
||||
OPERATION(scan, "Scan")
|
||||
OPERATION(tag_resource, "TagResource")
|
||||
OPERATION(transact_get_items, "TransactGetItems")
|
||||
OPERATION(transact_write_items, "TransactWriteItems")
|
||||
OPERATION(untag_resource, "UntagResource")
|
||||
OPERATION(update_continuous_backups, "UpdateContinuousBackups")
|
||||
OPERATION(update_global_table, "UpdateGlobalTable")
|
||||
OPERATION(update_global_table_settings, "UpdateGlobalTableSettings")
|
||||
OPERATION(update_item, "UpdateItem")
|
||||
OPERATION(update_table, "UpdateTable")
|
||||
OPERATION(update_time_to_live, "UpdateTimeToLive")
|
||||
OPERATION_LATENCY(put_item_latency, "PutItem")
|
||||
OPERATION_LATENCY(get_item_latency, "GetItem")
|
||||
OPERATION_LATENCY(delete_item_latency, "DeleteItem")
|
||||
OPERATION_LATENCY(update_item_latency, "UpdateItem")
|
||||
OPERATION(list_streams, "ListStreams")
|
||||
OPERATION(describe_stream, "DescribeStream")
|
||||
OPERATION(get_shard_iterator, "GetShardIterator")
|
||||
OPERATION(get_records, "GetRecords")
|
||||
OPERATION_LATENCY(get_records_latency, "GetRecords")
|
||||
});
|
||||
_metrics.add_group("alternator", {
|
||||
seastar::metrics::make_total_operations("unsupported_operations", unsupported_operations,
|
||||
seastar::metrics::description("number of unsupported operations via Alternator API")),
|
||||
seastar::metrics::make_total_operations("total_operations", total_operations,
|
||||
seastar::metrics::description("number of total operations via Alternator API")),
|
||||
seastar::metrics::make_total_operations("reads_before_write", reads_before_write,
|
||||
seastar::metrics::description("number of performed read-before-write operations")),
|
||||
seastar::metrics::make_total_operations("write_using_lwt", write_using_lwt,
|
||||
seastar::metrics::description("number of writes that used LWT")),
|
||||
seastar::metrics::make_total_operations("shard_bounce_for_lwt", shard_bounce_for_lwt,
|
||||
seastar::metrics::description("number writes that had to be bounced from this shard because of LWT requirements")),
|
||||
seastar::metrics::make_total_operations("requests_blocked_memory", requests_blocked_memory,
|
||||
seastar::metrics::description("Counts a number of requests blocked due to memory pressure.")),
|
||||
seastar::metrics::make_total_operations("requests_shed", requests_shed,
|
||||
seastar::metrics::description("Counts a number of requests shed due to overload.")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_read_total", cql_stats.filtered_rows_read_total,
|
||||
seastar::metrics::description("number of rows read during filtering operations")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_matched_total", cql_stats.filtered_rows_matched_total,
|
||||
seastar::metrics::description("number of rows read and matched during filtering operations")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_dropped_total", [this] { return cql_stats.filtered_rows_read_total - cql_stats.filtered_rows_matched_total; },
|
||||
seastar::metrics::description("number of rows read and dropped during filtering operations")),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
#include <seastar/core/metrics_registration.hh>
|
||||
#include "utils/histogram.hh"
|
||||
#include "cql3/stats.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// Object holding per-shard statistics related to Alternator.
|
||||
// While this object is alive, these metrics are also registered to be
|
||||
// visible by the metrics REST API, with the "alternator" prefix.
|
||||
class stats {
|
||||
public:
|
||||
stats();
|
||||
// Count of DynamoDB API operations by types
|
||||
struct {
|
||||
uint64_t batch_get_item = 0;
|
||||
uint64_t batch_write_item = 0;
|
||||
uint64_t create_backup = 0;
|
||||
uint64_t create_global_table = 0;
|
||||
uint64_t create_table = 0;
|
||||
uint64_t delete_backup = 0;
|
||||
uint64_t delete_item = 0;
|
||||
uint64_t delete_table = 0;
|
||||
uint64_t describe_backup = 0;
|
||||
uint64_t describe_continuous_backups = 0;
|
||||
uint64_t describe_endpoints = 0;
|
||||
uint64_t describe_global_table = 0;
|
||||
uint64_t describe_global_table_settings = 0;
|
||||
uint64_t describe_limits = 0;
|
||||
uint64_t describe_table = 0;
|
||||
uint64_t describe_time_to_live = 0;
|
||||
uint64_t get_item = 0;
|
||||
uint64_t list_backups = 0;
|
||||
uint64_t list_global_tables = 0;
|
||||
uint64_t list_tables = 0;
|
||||
uint64_t list_tags_of_resource = 0;
|
||||
uint64_t put_item = 0;
|
||||
uint64_t query = 0;
|
||||
uint64_t restore_table_from_backup = 0;
|
||||
uint64_t restore_table_to_point_in_time = 0;
|
||||
uint64_t scan = 0;
|
||||
uint64_t tag_resource = 0;
|
||||
uint64_t transact_get_items = 0;
|
||||
uint64_t transact_write_items = 0;
|
||||
uint64_t untag_resource = 0;
|
||||
uint64_t update_continuous_backups = 0;
|
||||
uint64_t update_global_table = 0;
|
||||
uint64_t update_global_table_settings = 0;
|
||||
uint64_t update_item = 0;
|
||||
uint64_t update_table = 0;
|
||||
uint64_t update_time_to_live = 0;
|
||||
uint64_t list_streams = 0;
|
||||
uint64_t describe_stream = 0;
|
||||
uint64_t get_shard_iterator = 0;
|
||||
uint64_t get_records = 0;
|
||||
|
||||
utils::timed_rate_moving_average_summary_and_histogram put_item_latency;
|
||||
utils::timed_rate_moving_average_summary_and_histogram get_item_latency;
|
||||
utils::timed_rate_moving_average_summary_and_histogram delete_item_latency;
|
||||
utils::timed_rate_moving_average_summary_and_histogram update_item_latency;
|
||||
utils::timed_rate_moving_average_summary_and_histogram get_records_latency;
|
||||
} api_operations;
|
||||
// Miscellaneous event counters
|
||||
uint64_t total_operations = 0;
|
||||
uint64_t unsupported_operations = 0;
|
||||
uint64_t reads_before_write = 0;
|
||||
uint64_t write_using_lwt = 0;
|
||||
uint64_t shard_bounce_for_lwt = 0;
|
||||
uint64_t requests_blocked_memory = 0;
|
||||
uint64_t requests_shed = 0;
|
||||
// CQL-derived stats
|
||||
cql3::cql_stats cql_stats;
|
||||
private:
|
||||
// The metric_groups object holds this stat object's metrics registered
|
||||
// as long as the stats object is alive.
|
||||
seastar::metrics::metric_groups _metrics;
|
||||
};
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,862 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <optional>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/lowres_clock.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/multiprecision/cpp_int.hpp>
|
||||
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "gms/inet_address.hh"
|
||||
#include "inet_address_vectors.hh"
|
||||
#include "locator/abstract_replication_strategy.hh"
|
||||
#include "log.hh"
|
||||
#include "gc_clock.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "service/client_state.hh"
|
||||
#include "service_permit.hh"
|
||||
#include "timestamp.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "service/pager/paging_state.hh"
|
||||
#include "service/pager/query_pagers.hh"
|
||||
#include "gms/feature_service.hh"
|
||||
#include "mutation/mutation.hh"
|
||||
#include "types/types.hh"
|
||||
#include "types/map.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/big_decimal.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "cql3/values.hh"
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include "alternator/executor.hh"
|
||||
#include "alternator/controller.hh"
|
||||
#include "alternator/serialization.hh"
|
||||
#include "dht/sharder.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/tags/utils.hh"
|
||||
|
||||
#include "ttl.hh"
|
||||
|
||||
static logging::logger tlogger("alternator_ttl");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// We write the expiration-time attribute enabled on a table using a
|
||||
// tag TTL_TAG_KEY.
|
||||
// Currently, the *value* of this tag is simply the name of the attribute,
|
||||
// and the expiration scanner interprets it as an Alternator attribute name -
|
||||
// It can refer to a real column or if that doesn't exist, to a member of
|
||||
// the ":attrs" map column. Although this is designed for Alternator, it may
|
||||
// be good enough for CQL as well (there, the ":attrs" column won't exist).
|
||||
static const sstring TTL_TAG_KEY("system:ttl_attribute");
|
||||
|
||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.update_time_to_live++;
|
||||
if (!_proxy.data_dictionary().features().alternator_ttl) {
|
||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator-ttl' experimental feature is enabled on all nodes.");
|
||||
}
|
||||
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
rjson::value* spec = rjson::find(request, "TimeToLiveSpecification");
|
||||
if (!spec || !spec->IsObject()) {
|
||||
co_return api_error::validation("UpdateTimeToLive missing mandatory TimeToLiveSpecification");
|
||||
}
|
||||
const rjson::value* v = rjson::find(*spec, "Enabled");
|
||||
if (!v || !v->IsBool()) {
|
||||
co_return api_error::validation("UpdateTimeToLive requires boolean Enabled");
|
||||
}
|
||||
bool enabled = v->GetBool();
|
||||
// Alternator TTL doesn't yet work when the table uses tablets (#16567)
|
||||
if (enabled && _proxy.local_db().find_keyspace(schema->ks_name()).get_replication_strategy().uses_tablets()) {
|
||||
co_return api_error::validation("TTL not yet supported on a table using tablets (issue #16567). "
|
||||
"Create a table with the tag 'experimental:initial_tablets' set to 'none' to use vnodes.");
|
||||
}
|
||||
v = rjson::find(*spec, "AttributeName");
|
||||
if (!v || !v->IsString()) {
|
||||
co_return api_error::validation("UpdateTimeToLive requires string AttributeName");
|
||||
}
|
||||
// Although the DynamoDB documentation specifies that attribute names
|
||||
// should be between 1 and 64K bytes, in practice, it only allows
|
||||
// between 1 and 255 bytes. There are no other limitations on which
|
||||
// characters are allowed in the name.
|
||||
if (v->GetStringLength() < 1 || v->GetStringLength() > 255) {
|
||||
co_return api_error::validation("The length of AttributeName must be between 1 and 255");
|
||||
}
|
||||
sstring attribute_name(v->GetString(), v->GetStringLength());
|
||||
|
||||
co_await db::modify_tags(_mm, schema->ks_name(), schema->cf_name(), [&](std::map<sstring, sstring>& tags_map) {
|
||||
if (enabled) {
|
||||
if (tags_map.contains(TTL_TAG_KEY)) {
|
||||
throw api_error::validation("TTL is already enabled");
|
||||
}
|
||||
tags_map[TTL_TAG_KEY] = attribute_name;
|
||||
} else {
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
throw api_error::validation("TTL is already disabled");
|
||||
} else if (i->second != attribute_name) {
|
||||
throw api_error::validation(format(
|
||||
"Requested to disable TTL on attribute {}, but a different attribute {} is enabled.",
|
||||
attribute_name, i->second));
|
||||
}
|
||||
tags_map.erase(TTL_TAG_KEY);
|
||||
}
|
||||
});
|
||||
|
||||
// Prepare the response, which contains a TimeToLiveSpecification
|
||||
// basically identical to the request's
|
||||
rjson::value response = rjson::empty_object();
|
||||
rjson::add(response, "TimeToLiveSpecification", std::move(*spec));
|
||||
co_return make_jsonable(std::move(response));
|
||||
}
|
||||
|
||||
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.describe_time_to_live++;
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
rjson::value desc = rjson::empty_object();
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
if (i == tags_map.end()) {
|
||||
rjson::add(desc, "TimeToLiveStatus", "DISABLED");
|
||||
} else {
|
||||
rjson::add(desc, "TimeToLiveStatus", "ENABLED");
|
||||
rjson::add(desc, "AttributeName", rjson::from_string(i->second));
|
||||
}
|
||||
rjson::value response = rjson::empty_object();
|
||||
rjson::add(response, "TimeToLiveDescription", std::move(desc));
|
||||
co_return make_jsonable(std::move(response));
|
||||
}
|
||||
|
||||
// expiration_service is a sharded service responsible for cleaning up expired
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLive request.
|
||||
//
|
||||
// Here is a brief overview of how the expiration service works:
|
||||
//
|
||||
// An expiration thread on each shard periodically scans the items (i.e.,
|
||||
// rows) owned by this shard, looking for items whose chosen expiration-time
|
||||
// attribute indicates they are expired, and deletes those items.
|
||||
// The expiration-time "attribute" can be either an actual Scylla column
|
||||
// (must be numeric) or an Alternator "attribute" - i.e., an element in
|
||||
// the ATTRS_COLUMN_NAME map<utf8,bytes> column where the numeric expiration
|
||||
// time is encoded in DynamoDB's JSON encoding inside the bytes value.
|
||||
// To avoid scanning the same items RF times in RF replicas, only one node is
|
||||
// responsible for scanning a token range at a time. Normally, this is the
|
||||
// node owning this range as a "primary range" (the first node in the ring
|
||||
// with this range), but when this node is down, the secondary owner (the
|
||||
// second in the ring) may take over.
|
||||
// An expiration thread is responsible for all tables which need expiration
|
||||
// scans. Currently, the different tables are scanned sequentially (not in
|
||||
// parallel).
|
||||
// The expiration thread scans item using CL=QUORUM to ensures that it reads
|
||||
// a consistent expiration-time attribute. This means that the items are read
|
||||
// locally and in addition QUORUM-1 additional nodes (one additional node
|
||||
// when RF=3) need to read the data and send digests.
|
||||
// When the expiration thread decides that an item has expired and wants
|
||||
// to delete it, it does it using a CL=QUORUM write. This allows this
|
||||
// deletion to be visible for consistent (quorum) reads. The deletion,
|
||||
// like user deletions, will also appear on the CDC log and therefore
|
||||
// Alternator Streams if enabled - currently as ordinary deletes (the
|
||||
// userIdentity flag is currently missing this is issue #11523).
|
||||
expiration_service::expiration_service(data_dictionary::database db, service::storage_proxy& proxy, gms::gossiper& g)
|
||||
: _db(db)
|
||||
, _proxy(proxy)
|
||||
, _gossiper(g)
|
||||
{
|
||||
}
|
||||
|
||||
// Convert the big_decimal used to represent expiration time to an integer.
|
||||
// Any fractional part is dropped. If the number is negative or invalid,
|
||||
// 0 is returned, and if it's too high, the maximum unsigned long is returned.
|
||||
static unsigned long bigdecimal_to_ul(const big_decimal& bd) {
|
||||
// The big_decimal format has an integer mantissa of arbitrary length
|
||||
// "unscaled_value" and then a (power of 10) exponent "scale".
|
||||
if (bd.unscaled_value() <= 0) {
|
||||
return 0;
|
||||
}
|
||||
if (bd.scale() == 0) {
|
||||
// The fast path, when the expiration time is an integer, scale==0.
|
||||
return static_cast<unsigned long>(bd.unscaled_value());
|
||||
}
|
||||
// Because the mantissa can be of arbitrary length, we work on it
|
||||
// as a string. TODO: find a less ugly algorithm.
|
||||
auto str = bd.unscaled_value().str();
|
||||
if (bd.scale() > 0) {
|
||||
int len = str.length();
|
||||
if (len < bd.scale()) {
|
||||
return 0;
|
||||
}
|
||||
str = str.substr(0, len-bd.scale());
|
||||
} else {
|
||||
if (bd.scale() < -20) {
|
||||
return std::numeric_limits<unsigned long>::max();
|
||||
}
|
||||
for (int i = 0; i < -bd.scale(); i++) {
|
||||
str.push_back('0');
|
||||
}
|
||||
}
|
||||
// strtoul() returns ULONG_MAX if the number is too large, or 0 if not
|
||||
// a number.
|
||||
return strtoul(str.c_str(), nullptr, 10);
|
||||
}
|
||||
|
||||
// The following is_expired() functions all check if an item with the given
|
||||
// expiration time has expired, according to the DynamoDB API rules.
|
||||
// The rules are:
|
||||
// 1. If the expiration time attribute's value is not a number type,
|
||||
// the item is not expired.
|
||||
// 2. The expiration time is measured in seconds since the UNIX epoch.
|
||||
// 3. If the expiration time is more than 5 years in the past, it is assumed
|
||||
// to be malformed and ignored - and the item does not expire.
|
||||
static bool is_expired(gc_clock::time_point expiration_time, gc_clock::time_point now) {
|
||||
return expiration_time <= now &&
|
||||
expiration_time > now - std::chrono::years(5);
|
||||
}
|
||||
|
||||
static bool is_expired(const big_decimal& expiration_time, gc_clock::time_point now) {
|
||||
unsigned long t = bigdecimal_to_ul(expiration_time);
|
||||
// We assume - and the assumption turns out to be correct - that the
|
||||
// epoch of gc_clock::time_point and the one used by the DynamoDB protocol
|
||||
// are the same (the UNIX epoch in UTC). The resolution (seconds) is also
|
||||
// the same.
|
||||
return is_expired(gc_clock::time_point(gc_clock::duration(std::chrono::seconds(t))), now);
|
||||
}
|
||||
static bool is_expired(const rjson::value& expiration_time, gc_clock::time_point now) {
|
||||
std::optional<big_decimal> n = try_unwrap_number(expiration_time);
|
||||
return n && is_expired(*n, now);
|
||||
}
|
||||
|
||||
// expire_item() expires an item - i.e., deletes it as appropriate for
|
||||
// expiration - with CL=QUORUM and (FIXME!) in a way Alternator Streams
|
||||
// understands it is an expiration event - not a user-initiated deletion.
|
||||
static future<> expire_item(service::storage_proxy& proxy,
|
||||
const service::query_state& qs,
|
||||
const std::vector<managed_bytes_opt>& row,
|
||||
schema_ptr schema,
|
||||
api::timestamp_type ts) {
|
||||
// Prepare the row key to delete
|
||||
// NOTICE: the order of columns is guaranteed by the fact that selection::wildcard
|
||||
// is used, which indicates that columns appear in the order defined by
|
||||
// schema::all_columns_in_select_order() - partition key columns goes first,
|
||||
// immediately followed by clustering key columns
|
||||
std::vector<bytes> exploded_pk;
|
||||
const unsigned pk_size = schema->partition_key_size();
|
||||
const unsigned ck_size = schema->clustering_key_size();
|
||||
for (unsigned c = 0; c < pk_size; ++c) {
|
||||
const auto& row_c = row[c];
|
||||
if (!row_c) {
|
||||
// This shouldn't happen - all key columns must have values.
|
||||
// But if it ever happens, let's just *not* expire the item.
|
||||
// FIXME: log or increment a metric if this happens.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
exploded_pk.push_back(to_bytes(*row_c));
|
||||
}
|
||||
auto pk = partition_key::from_exploded(exploded_pk);
|
||||
mutation m(schema, pk);
|
||||
// If there's no clustering key, a tombstone should be created directly
|
||||
// on a partition, not on a clustering row - otherwise it will look like
|
||||
// an open-ended range tombstone, which will crash on KA/LA sstable format.
|
||||
// See issue #6035
|
||||
if (ck_size == 0) {
|
||||
m.partition().apply(tombstone(ts, gc_clock::now()));
|
||||
} else {
|
||||
std::vector<bytes> exploded_ck;
|
||||
for (unsigned c = pk_size; c < pk_size + ck_size; ++c) {
|
||||
const auto& row_c = row[c];
|
||||
if (!row_c) {
|
||||
// This shouldn't happen - all key columns must have values.
|
||||
// But if it ever happens, let's just *not* expire the item.
|
||||
// FIXME: log or increment a metric if this happens.
|
||||
return make_ready_future<>();
|
||||
}
|
||||
exploded_ck.push_back(to_bytes(*row_c));
|
||||
}
|
||||
auto ck = clustering_key::from_exploded(exploded_ck);
|
||||
m.partition().clustered_row(*schema, ck).apply(tombstone(ts, gc_clock::now()));
|
||||
}
|
||||
std::vector<mutation> mutations;
|
||||
mutations.push_back(std::move(m));
|
||||
return proxy.mutate(std::move(mutations),
|
||||
db::consistency_level::LOCAL_QUORUM,
|
||||
executor::default_timeout(), // FIXME - which timeout?
|
||||
qs.get_trace_state(), qs.get_permit(),
|
||||
db::allow_per_partition_rate_limit::no);
|
||||
}
|
||||
|
||||
static size_t random_offset(size_t min, size_t max) {
|
||||
static thread_local std::default_random_engine re{std::random_device{}()};
|
||||
std::uniform_int_distribution<size_t> dist(min, max);
|
||||
return dist(re);
|
||||
}
|
||||
|
||||
// Get a list of secondary token ranges for the given node, and the primary
|
||||
// node responsible for each of these token ranges.
|
||||
// A "secondary range" is a range of tokens where for each token, the second
|
||||
// node (in ring order) out of the RF replicas that hold this token is the
|
||||
// given node.
|
||||
// In the expiration scanner, we want to scan a secondary range but only if
|
||||
// this range's primary node is down. For this we need to return not just
|
||||
// a list of this node's secondary ranges - but also the primary owner of
|
||||
// each of those ranges.
|
||||
static future<std::vector<std::pair<dht::token_range, gms::inet_address>>> get_secondary_ranges(
|
||||
const locator::effective_replication_map_ptr& erm,
|
||||
gms::inet_address ep) {
|
||||
const auto& tm = *erm->get_token_metadata_ptr();
|
||||
const auto& sorted_tokens = tm.sorted_tokens();
|
||||
std::vector<std::pair<dht::token_range, gms::inet_address>> ret;
|
||||
if (sorted_tokens.empty()) {
|
||||
on_internal_error(tlogger, "Token metadata is empty");
|
||||
}
|
||||
auto prev_tok = sorted_tokens.back();
|
||||
for (const auto& tok : sorted_tokens) {
|
||||
co_await coroutine::maybe_yield();
|
||||
inet_address_vector_replica_set eps = erm->get_natural_endpoints(tok);
|
||||
if (eps.size() <= 1 || eps[1] != ep) {
|
||||
prev_tok = tok;
|
||||
continue;
|
||||
}
|
||||
// Add the range (prev_tok, tok] to ret. However, if the range wraps
|
||||
// around, split it to two non-wrapping ranges.
|
||||
if (prev_tok < tok) {
|
||||
ret.emplace_back(
|
||||
dht::token_range{
|
||||
dht::token_range::bound(prev_tok, false),
|
||||
dht::token_range::bound(tok, true)},
|
||||
eps[0]);
|
||||
} else {
|
||||
ret.emplace_back(
|
||||
dht::token_range{
|
||||
dht::token_range::bound(prev_tok, false),
|
||||
std::nullopt},
|
||||
eps[0]);
|
||||
ret.emplace_back(
|
||||
dht::token_range{
|
||||
std::nullopt,
|
||||
dht::token_range::bound(tok, true)},
|
||||
eps[0]);
|
||||
}
|
||||
prev_tok = tok;
|
||||
}
|
||||
co_return ret;
|
||||
}
|
||||
|
||||
|
||||
// A class for iterating over all the token ranges *owned* by this shard.
|
||||
// To avoid code duplication, it is a template with two distinct cases -
|
||||
// <primary> and <secondary>:
|
||||
//
|
||||
// In the <primary> case, we consider a token *owned* by this shard if:
|
||||
// 1. This node is a replica for this token.
|
||||
// 2. Moreover, this node is the *primary* replica of the token (i.e., the
|
||||
// first replica in the ring).
|
||||
// 3. In this node, this shard is responsible for this token.
|
||||
// We will use this definition of which shard in the cluster owns which tokens
|
||||
// to split the expiration scanner's work between all the shards of the
|
||||
// system.
|
||||
//
|
||||
// In the <secondary> case, we consider a token *owned* by this shard if:
|
||||
// 1. This node is the *secondary* replica for this token (i.e., the second
|
||||
// replica in the ring).
|
||||
// 2. The primary replica for this token is currently marked down.
|
||||
// 3. In this node, this shard is responsible for this token.
|
||||
// We use the <secondary> case to handle the possibility that some of the
|
||||
// nodes in the system are down. A dead node will not be expiring
|
||||
// the tokens owned by it, so we want the secondary owner to take over its
|
||||
// primary ranges.
|
||||
//
|
||||
// FIXME: need to decide how to choose primary ranges in multi-DC setup!
|
||||
// We could call get_primary_ranges_within_dc() below instead of get_primary_ranges().
|
||||
// NOTICE: Iteration currently starts from a random token range in order to improve
|
||||
// the chances of covering all ranges during a scan when restarts occur.
|
||||
// A more deterministic way would be to regularly persist the scanning state,
|
||||
// but that incurs overhead that we want to avoid if not needed.
|
||||
//
|
||||
// FIXME: Check if this algorithm is safe with tablet migration.
|
||||
// https://github.com/scylladb/scylladb/issues/16567
|
||||
|
||||
// ranges_holder_primary holds just the primary ranges themselves
|
||||
class ranges_holder_primary {
|
||||
dht::token_range_vector _token_ranges;
|
||||
public:
|
||||
explicit ranges_holder_primary(dht::token_range_vector token_ranges) : _token_ranges(std::move(token_ranges)) {}
|
||||
static future<ranges_holder_primary> make(const locator::vnode_effective_replication_map_ptr& erm, gms::inet_address ep) {
|
||||
co_return ranges_holder_primary(co_await erm->get_primary_ranges(ep));
|
||||
}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
return _token_ranges[i];
|
||||
}
|
||||
bool should_skip(std::size_t i) const {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
// ranges_holder<secondary> holds the secondary token ranges plus each
|
||||
// range's primary owner, needed to implement should_skip().
|
||||
class ranges_holder_secondary {
|
||||
std::vector<std::pair<dht::token_range, gms::inet_address>> _token_ranges;
|
||||
const gms::gossiper& _gossiper;
|
||||
public:
|
||||
explicit ranges_holder_secondary(std::vector<std::pair<dht::token_range, gms::inet_address>> token_ranges, const gms::gossiper& g)
|
||||
: _token_ranges(std::move(token_ranges))
|
||||
, _gossiper(g) {}
|
||||
static future<ranges_holder_secondary> make(const locator::effective_replication_map_ptr& erm, gms::inet_address ep, const gms::gossiper& g) {
|
||||
co_return ranges_holder_secondary(co_await get_secondary_ranges(erm, ep), g);
|
||||
}
|
||||
std::size_t size() const { return _token_ranges.size(); }
|
||||
const dht::token_range& operator[](std::size_t i) const {
|
||||
return _token_ranges[i].first;
|
||||
}
|
||||
// range i should be skipped if its primary owner is alive.
|
||||
bool should_skip(std::size_t i) const {
|
||||
return _gossiper.is_alive(_token_ranges[i].second);
|
||||
}
|
||||
};
|
||||
|
||||
template<class primary_or_secondary_t>
|
||||
class token_ranges_owned_by_this_shard {
|
||||
schema_ptr _s;
|
||||
locator::effective_replication_map_ptr _erm;
|
||||
// _token_ranges will contain a list of token ranges owned by this node.
|
||||
// We'll further need to split each such range to the pieces owned by
|
||||
// the current shard, using _intersecter.
|
||||
const primary_or_secondary_t _token_ranges;
|
||||
// NOTICE: _range_idx is used modulo _token_ranges size when accessing
|
||||
// the data to ensure that it doesn't go out of bounds
|
||||
size_t _range_idx;
|
||||
size_t _end_idx;
|
||||
std::optional<dht::selective_token_range_sharder> _intersecter;
|
||||
public:
|
||||
token_ranges_owned_by_this_shard(schema_ptr s, primary_or_secondary_t token_ranges)
|
||||
: _s(s)
|
||||
, _erm(s->table().get_effective_replication_map())
|
||||
, _token_ranges(std::move(token_ranges))
|
||||
, _range_idx(random_offset(0, _token_ranges.size() - 1))
|
||||
, _end_idx(_range_idx + _token_ranges.size())
|
||||
{
|
||||
tlogger.debug("Generating token ranges starting from base range {} of {}", _range_idx, _token_ranges.size());
|
||||
}
|
||||
|
||||
// Return the next token_range owned by this shard, or nullopt when the
|
||||
// iteration ends.
|
||||
std::optional<dht::token_range> next() {
|
||||
// We may need three or more iterations in the following loop if a
|
||||
// vnode doesn't intersect with the given shard at all (such a small
|
||||
// vnode is unlikely, but possible). The loop cannot be infinite
|
||||
// because each iteration of the loop advances _range_idx.
|
||||
for (;;) {
|
||||
if (_intersecter) {
|
||||
std::optional<dht::token_range> ret = _intersecter->next();
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
// done with this range, go to next one
|
||||
++_range_idx;
|
||||
_intersecter = std::nullopt;
|
||||
}
|
||||
if (_range_idx == _end_idx) {
|
||||
return std::nullopt;
|
||||
}
|
||||
// If should_skip(), the range should be skipped. This happens for
|
||||
// a secondary range whose primary owning node is still alive.
|
||||
while (_token_ranges.should_skip(_range_idx % _token_ranges.size())) {
|
||||
++_range_idx;
|
||||
if (_range_idx == _end_idx) {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
_intersecter.emplace(_erm->get_sharder(*_s), _token_ranges[_range_idx % _token_ranges.size()], this_shard_id());
|
||||
}
|
||||
}
|
||||
|
||||
// Same as next(), just return a partition_range instead of token_range
|
||||
std::optional<dht::partition_range> next_partition_range() {
|
||||
std::optional<dht::token_range> ret = next();
|
||||
if (ret) {
|
||||
return dht::to_partition_range(*ret);
|
||||
} else {
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Precomputed information needed to perform a scan on partition ranges
|
||||
struct scan_ranges_context {
|
||||
schema_ptr s;
|
||||
bytes column_name;
|
||||
std::optional<std::string> member;
|
||||
|
||||
service::client_state internal_client_state;
|
||||
::shared_ptr<cql3::selection::selection> selection;
|
||||
std::unique_ptr<service::query_state> query_state_ptr;
|
||||
std::unique_ptr<cql3::query_options> query_options;
|
||||
::lw_shared_ptr<query::read_command> command;
|
||||
|
||||
scan_ranges_context(schema_ptr s, service::storage_proxy& proxy, bytes column_name, std::optional<std::string> member)
|
||||
: s(s)
|
||||
, column_name(column_name)
|
||||
, member(member)
|
||||
, internal_client_state(service::client_state::internal_tag())
|
||||
{
|
||||
// FIXME: don't read the entire items - read only parts of it.
|
||||
// We must read the key columns (to be able to delete) and also
|
||||
// the requested attribute. If the requested attribute is a map's
|
||||
// member we may be forced to read the entire map - but it would
|
||||
// be good if we can read only the single item of the map - it
|
||||
// should be possible (and a must for issue #7751!).
|
||||
lw_shared_ptr<service::pager::paging_state> paging_state = nullptr;
|
||||
auto regular_columns = boost::copy_range<query::column_id_vector>(
|
||||
s->regular_columns() | boost::adaptors::transformed([] (const column_definition& cdef) { return cdef.id; }));
|
||||
selection = cql3::selection::selection::wildcard(s);
|
||||
query::partition_slice::option_set opts = selection->get_query_options();
|
||||
opts.set<query::partition_slice::option::allow_short_read>();
|
||||
// It is important that the scan bypass cache to avoid polluting it:
|
||||
opts.set<query::partition_slice::option::bypass_cache>();
|
||||
std::vector<query::clustering_range> ck_bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
auto partition_slice = query::partition_slice(std::move(ck_bounds), {}, std::move(regular_columns), opts);
|
||||
command = ::make_lw_shared<query::read_command>(s->id(), s->version(), partition_slice, proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
tracing::trace_state_ptr trace_state;
|
||||
// NOTICE: empty_service_permit is used because the TTL service has fixed parallelism
|
||||
query_state_ptr = std::make_unique<service::query_state>(internal_client_state, trace_state, empty_service_permit());
|
||||
// FIXME: What should we do on multi-DC? Will we run the expiration on the same ranges on all
|
||||
// DCs or only once for each range? If the latter, we need to change the CLs in the
|
||||
// scanner and deleter.
|
||||
db::consistency_level cl = db::consistency_level::LOCAL_QUORUM;
|
||||
query_options = std::make_unique<cql3::query_options>(cl, std::vector<cql3::raw_value>{});
|
||||
query_options = std::make_unique<cql3::query_options>(std::move(query_options), std::move(paging_state));
|
||||
}
|
||||
};
|
||||
|
||||
// Scan data in a list of token ranges in one table, looking for expired
|
||||
// items and deleting them.
|
||||
// Because of issue #9167, partition_ranges must have a single partition
|
||||
// range for this code to work correctly.
|
||||
static future<> scan_table_ranges(
|
||||
service::storage_proxy& proxy,
|
||||
const scan_ranges_context& scan_ctx,
|
||||
dht::partition_range_vector&& partition_ranges,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
{
|
||||
const schema_ptr& s = scan_ctx.s;
|
||||
assert (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||
auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr,
|
||||
*scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr);
|
||||
while (!p->is_exhausted()) {
|
||||
if (abort_source.abort_requested()) {
|
||||
co_return;
|
||||
}
|
||||
auto units = co_await get_units(page_sem, 1);
|
||||
// We don't need to limit page size in number of rows because there is
|
||||
// a builtin limit of the page's size in bytes. Setting this limit to
|
||||
// 1 is useful for debugging the paging code with moderate-size data.
|
||||
uint32_t limit = std::numeric_limits<uint32_t>::max();
|
||||
// Read a page, and if that times out, try again after a small sleep.
|
||||
// If we didn't catch the timeout exception, it would cause the scan
|
||||
// be aborted and only be restarted at the next scanning period.
|
||||
// If we retry too many times, give up and restart the scan later.
|
||||
std::unique_ptr<cql3::result_set> rs;
|
||||
for (int retries=0; ; retries++) {
|
||||
try {
|
||||
// FIXME: which timeout?
|
||||
rs = co_await p->fetch_page(limit, gc_clock::now(), executor::default_timeout());
|
||||
break;
|
||||
} catch(exceptions::read_timeout_exception&) {
|
||||
tlogger.warn("expiration scanner read timed out, will retry: {}",
|
||||
std::current_exception());
|
||||
}
|
||||
// If we didn't break out of this loop, add a minimal sleep
|
||||
if (retries >= 10) {
|
||||
// Don't get stuck forever asking the same page, maybe there's
|
||||
// a bug or a real problem in several replicas. Give up on
|
||||
// this scan an retry the scan from a random position later,
|
||||
// in the next scan period.
|
||||
throw runtime_exception("scanner thread failed after too many timeouts for the same page");
|
||||
}
|
||||
co_await sleep_abortable(std::chrono::seconds(1), abort_source);
|
||||
}
|
||||
auto rows = rs->rows();
|
||||
auto meta = rs->get_metadata().get_names();
|
||||
std::optional<unsigned> expiration_column;
|
||||
for (unsigned i = 0; i < meta.size(); i++) {
|
||||
const cql3::column_specification& col = *meta[i];
|
||||
if (col.name->name() == scan_ctx.column_name) {
|
||||
expiration_column = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!expiration_column) {
|
||||
continue;
|
||||
}
|
||||
for (const auto& row : rows) {
|
||||
const managed_bytes_opt& cell = row[*expiration_column];
|
||||
if (!cell) {
|
||||
continue;
|
||||
}
|
||||
auto v = meta[*expiration_column]->type->deserialize(*cell);
|
||||
bool expired = false;
|
||||
// FIXME: don't recalculate "now" all the time
|
||||
auto now = gc_clock::now();
|
||||
if (scan_ctx.member) {
|
||||
// In this case, the expiration-time attribute we're
|
||||
// looking for is a member in a map, saved serialized
|
||||
// into bytes using Alternator's serialization (basically
|
||||
// a JSON serialized into bytes)
|
||||
// FIXME: is it possible to find a specific member of a map
|
||||
// without iterating through it like we do here and compare
|
||||
// the key?
|
||||
for (const auto& entry : value_cast<map_type_impl::native_type>(v)) {
|
||||
std::string attr_name = value_cast<sstring>(entry.first);
|
||||
if (value_cast<sstring>(entry.first) == *scan_ctx.member) {
|
||||
bytes value = value_cast<bytes>(entry.second);
|
||||
rjson::value json = deserialize_item(value);
|
||||
expired = is_expired(json, now);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For a real column to contain an expiration time, it
|
||||
// must be a numeric type.
|
||||
// FIXME: Currently we only support decimal_type (which is
|
||||
// what Alternator uses), but other numeric types can be
|
||||
// supported as well to make this feature more useful in CQL.
|
||||
// Note that kind::decimal is also checked above.
|
||||
big_decimal n = value_cast<big_decimal>(v);
|
||||
expired = is_expired(n, now);
|
||||
}
|
||||
if (expired) {
|
||||
expiration_stats.items_deleted++;
|
||||
// FIXME: maybe don't recalculate new_timestamp() all the time
|
||||
// FIXME: if expire_item() throws on timeout, we need to retry it.
|
||||
auto ts = api::new_timestamp();
|
||||
co_await expire_item(proxy, *scan_ctx.query_state_ptr, row, s, ts);
|
||||
}
|
||||
}
|
||||
// FIXME: once in a while, persist p->state(), so on reboot
|
||||
// we don't start from scratch.
|
||||
}
|
||||
}
|
||||
|
||||
// scan_table() scans, in one table, data "owned" by this shard, looking for
|
||||
// expired items and deleting them.
|
||||
// We consider each node to "own" its primary token ranges, i.e., the tokens
|
||||
// that this node is their first replica in the ring. Inside the node, each
|
||||
// shard "owns" subranges of the node's token ranges - according to the node's
|
||||
// sharding algorithm.
|
||||
// When a node goes down, the token ranges owned by it will not be scanned
|
||||
// and items in those token ranges will not expire, so in the future (FIXME)
|
||||
// this function should additionally work on token ranges whose primary owner
|
||||
// is down and this node is the range's secondary owner.
|
||||
// If the TTL (expiration-time scanning) feature is not enabled for this
|
||||
// table, scan_table() returns false without doing anything. Remember that the
|
||||
// TTL feature may be enabled later so this function will need to be called
|
||||
// again when the feature is enabled.
|
||||
// Currently this function scans the entire table (or, rather the parts owned
|
||||
// by this shard) at full rate, once. In the future (FIXME) we should consider
|
||||
// how to pace this scan, how and when to repeat it, how to interleave or
|
||||
// parallelize scanning of multiple tables, and how to continue scans after a
|
||||
// reboot.
|
||||
static future<bool> scan_table(
|
||||
service::storage_proxy& proxy,
|
||||
data_dictionary::database db,
|
||||
gms::gossiper& gossiper,
|
||||
schema_ptr s,
|
||||
abort_source& abort_source,
|
||||
named_semaphore& page_sem,
|
||||
expiration_service::stats& expiration_stats)
|
||||
{
|
||||
// Check if an expiration-time attribute is enabled for this table.
|
||||
// If not, just return false immediately.
|
||||
// FIXME: the setting of the TTL may change in the middle of a long scan!
|
||||
std::optional<std::string> attribute_name = db::find_tag(*s, TTL_TAG_KEY);
|
||||
if (!attribute_name) {
|
||||
co_return false;
|
||||
}
|
||||
// attribute_name may be one of the schema's columns (in Alternator, this
|
||||
// means it's a key column), or an element in Alternator's attrs map
|
||||
// encoded in Alternator's JSON encoding.
|
||||
// FIXME: To make this less Alternators-specific, we should encode in the
|
||||
// single key's value three things:
|
||||
// 1. The name of a column
|
||||
// 2. Optionally if column is a map, a member in the map
|
||||
// 3. The deserializer for the value: CQL or Alternator (JSON).
|
||||
// The deserializer can be guessed: If the given column or map item is
|
||||
// numeric, it can be used directly. If it is a "bytes" type, it needs to
|
||||
// be deserialized using Alternator's deserializer.
|
||||
bytes column_name = to_bytes(*attribute_name);
|
||||
const column_definition *cd = s->get_column_definition(column_name);
|
||||
std::optional<std::string> member;
|
||||
if (!cd) {
|
||||
member = std::move(attribute_name);
|
||||
column_name = bytes(executor::ATTRS_COLUMN_NAME);
|
||||
cd = s->get_column_definition(column_name);
|
||||
tlogger.info("table {} TTL enabled with attribute {} in {}", s->cf_name(), *member, executor::ATTRS_COLUMN_NAME);
|
||||
} else {
|
||||
tlogger.info("table {} TTL enabled with attribute {}", s->cf_name(), *attribute_name);
|
||||
}
|
||||
if (!cd) {
|
||||
tlogger.info("table {} TTL column is missing, not scanning", s->cf_name());
|
||||
co_return false;
|
||||
}
|
||||
data_type column_type = cd->type;
|
||||
// Verify that the column has the right type: If "member" exists
|
||||
// the column must be a map, and if it doesn't, the column must
|
||||
// (currently) be a decimal_type. If the column has the wrong type
|
||||
// nothing can get expired in this table, and it's pointless to
|
||||
// scan it.
|
||||
if ((member && column_type->get_kind() != abstract_type::kind::map) ||
|
||||
(!member && column_type->get_kind() != abstract_type::kind::decimal)) {
|
||||
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
|
||||
co_return false;
|
||||
}
|
||||
expiration_stats.scan_table++;
|
||||
// FIXME: need to pace the scan, not do it all at once.
|
||||
scan_ranges_context scan_ctx{s, proxy, std::move(column_name), std::move(member)};
|
||||
auto erm = db.real_database().find_keyspace(s->ks_name()).get_vnode_effective_replication_map();
|
||||
auto my_address = erm->get_topology().my_address();
|
||||
token_ranges_owned_by_this_shard my_ranges(s, co_await ranges_holder_primary::make(erm, my_address));
|
||||
while (std::optional<dht::partition_range> range = my_ranges.next_partition_range()) {
|
||||
// Note that because of issue #9167 we need to run a separate
|
||||
// query on each partition range, and can't pass several of
|
||||
// them into one partition_range_vector.
|
||||
dht::partition_range_vector partition_ranges;
|
||||
partition_ranges.push_back(std::move(*range));
|
||||
// FIXME: if scanning a single range fails, including network errors,
|
||||
// we fail the entire scan (and rescan from the beginning). Need to
|
||||
// reconsider this. Saving the scan position might be a good enough
|
||||
// solution for this problem.
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
}
|
||||
// If each node only scans its own primary ranges, then when any node is
|
||||
// down part of the token range will not get scanned. This can be viewed
|
||||
// as acceptable (when the comes back online, it will resume its scan),
|
||||
// but as noted in issue #9787, we can allow more prompt expiration
|
||||
// by tasking another node to take over scanning of the dead node's primary
|
||||
// ranges. What we do here is that this node will also check expiration
|
||||
// on its *secondary* ranges - but only those whose primary owner is down.
|
||||
token_ranges_owned_by_this_shard my_secondary_ranges(s, co_await ranges_holder_secondary::make(erm, my_address, gossiper));
|
||||
while (std::optional<dht::partition_range> range = my_secondary_ranges.next_partition_range()) {
|
||||
expiration_stats.secondary_ranges_scanned++;
|
||||
dht::partition_range_vector partition_ranges;
|
||||
partition_ranges.push_back(std::move(*range));
|
||||
co_await scan_table_ranges(proxy, scan_ctx, std::move(partition_ranges), abort_source, page_sem, expiration_stats);
|
||||
}
|
||||
co_return true;
|
||||
}
|
||||
|
||||
|
||||
future<> expiration_service::run() {
|
||||
// FIXME: don't just tight-loop, think about timing, pace, and
|
||||
// store position in durable storage, etc.
|
||||
// FIXME: think about working on different tables in parallel.
|
||||
// also need to notice when a new table is added, a table is
|
||||
// deleted or when ttl is enabled or disabled for a table!
|
||||
for (;;) {
|
||||
auto start = lowres_clock::now();
|
||||
// _db.tables() may change under our feet during a
|
||||
// long-living loop, so we must keep our own copy of the list of
|
||||
// schemas.
|
||||
std::vector<schema_ptr> schemas;
|
||||
for (auto cf : _db.get_tables()) {
|
||||
schemas.push_back(cf.schema());
|
||||
}
|
||||
for (schema_ptr s : schemas) {
|
||||
co_await coroutine::maybe_yield();
|
||||
if (shutting_down()) {
|
||||
co_return;
|
||||
}
|
||||
try {
|
||||
co_await scan_table(_proxy, _db, _gossiper, s, _abort_source, _page_sem, _expiration_stats);
|
||||
} catch (...) {
|
||||
// The scan of a table may fail in the middle for many
|
||||
// reasons, including network failure and even the table
|
||||
// being removed. We'll continue scanning this table later
|
||||
// (if it still exists). In any case it's important to catch
|
||||
// the exception and not let the scanning service die for
|
||||
// good.
|
||||
// If the table has been deleted, it is expected that the scan
|
||||
// will fail at some point, and even a warning is excessive.
|
||||
if (_db.has_schema(s->ks_name(), s->cf_name())) {
|
||||
tlogger.warn("table {}.{} expiration scan failed: {}",
|
||||
s->ks_name(), s->cf_name(), std::current_exception());
|
||||
} else {
|
||||
tlogger.info("expiration scan failed when table {}.{} was deleted",
|
||||
s->ks_name(), s->cf_name());
|
||||
}
|
||||
}
|
||||
}
|
||||
_expiration_stats.scan_passes++;
|
||||
// The TTL scanner runs above once over all tables, at full steam.
|
||||
// After completing such a scan, we sleep until it's time start
|
||||
// another scan. TODO: If the scan went too fast, we can slow it down
|
||||
// in the next iteration by reducing the scanner's scheduling-group
|
||||
// share (if using a separate scheduling group), or introduce
|
||||
// finer-grain sleeps into the scanning code.
|
||||
std::chrono::milliseconds scan_duration(std::chrono::duration_cast<std::chrono::milliseconds>(lowres_clock::now() - start));
|
||||
std::chrono::milliseconds period(long(_db.get_config().alternator_ttl_period_in_seconds() * 1000));
|
||||
if (scan_duration < period) {
|
||||
try {
|
||||
tlogger.info("sleeping {} seconds until next period", (period - scan_duration).count()/1000.0);
|
||||
co_await seastar::sleep_abortable(period - scan_duration, _abort_source);
|
||||
} catch(seastar::sleep_aborted&) {}
|
||||
} else {
|
||||
tlogger.warn("scan took {} seconds, longer than period - not sleeping", scan_duration.count()/1000.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
future<> expiration_service::start() {
|
||||
// Called by main() on each shard to start the expiration-service
|
||||
// thread. Just runs run() in the background and allows stop().
|
||||
if (_db.features().alternator_ttl) {
|
||||
if (!shutting_down()) {
|
||||
_end = run().handle_exception([] (std::exception_ptr ep) {
|
||||
tlogger.error("expiration_service failed: {}", ep);
|
||||
});
|
||||
}
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> expiration_service::stop() {
|
||||
if (_abort_source.abort_requested()) {
|
||||
throw std::logic_error("expiration_service::stop() called a second time");
|
||||
}
|
||||
_abort_source.request_abort();
|
||||
if (!_end) {
|
||||
// if _end is was not set, start() was never called
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return std::move(*_end);
|
||||
}
|
||||
|
||||
expiration_service::stats::stats() {
|
||||
_metrics.add_group("expiration", {
|
||||
seastar::metrics::make_total_operations("scan_passes", scan_passes,
|
||||
seastar::metrics::description("number of passes over the database")),
|
||||
seastar::metrics::make_total_operations("scan_table", scan_table,
|
||||
seastar::metrics::description("number of table scans (counting each scan of each table that enabled expiration)")),
|
||||
seastar::metrics::make_total_operations("items_deleted", items_deleted,
|
||||
seastar::metrics::description("number of items deleted after expiration")),
|
||||
seastar::metrics::make_total_operations("secondary_ranges_scanned", secondary_ranges_scanned,
|
||||
seastar::metrics::description("number of token ranges scanned by this node while their primary owner was down")),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
} // namespace alternator
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <seastar/core/semaphore.hh>
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
|
||||
namespace gms {
|
||||
class gossiper;
|
||||
}
|
||||
|
||||
namespace replica {
|
||||
class database;
|
||||
}
|
||||
|
||||
namespace service {
|
||||
class storage_proxy;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// expiration_service is a sharded service responsible for cleaning up expired
|
||||
// items in all tables with per-item expiration enabled. Currently, this means
|
||||
// Alternator tables with TTL configured via a UpdateTimeToLeave request.
|
||||
class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
|
||||
public:
|
||||
// Object holding per-shard statistics related to the expiration service.
|
||||
// While this object is alive, these metrics are also registered to be
|
||||
// visible by the metrics REST API, with the "expiration_" prefix.
|
||||
class stats {
|
||||
public:
|
||||
stats();
|
||||
uint64_t scan_passes = 0;
|
||||
uint64_t scan_table = 0;
|
||||
uint64_t items_deleted = 0;
|
||||
uint64_t secondary_ranges_scanned = 0;
|
||||
private:
|
||||
// The metric_groups object holds this stat object's metrics registered
|
||||
// as long as the stats object is alive.
|
||||
seastar::metrics::metric_groups _metrics;
|
||||
};
|
||||
private:
|
||||
data_dictionary::database _db;
|
||||
service::storage_proxy& _proxy;
|
||||
gms::gossiper& _gossiper;
|
||||
// _end is set by start(), and resolves when the the background service
|
||||
// started by it ends. To ask the background service to end, _abort_source
|
||||
// should be triggered. stop() below uses both _abort_source and _end.
|
||||
std::optional<future<>> _end;
|
||||
abort_source _abort_source;
|
||||
// Ensures that at most 1 page of scan results at a time is processed by the TTL service
|
||||
named_semaphore _page_sem{1, named_semaphore_exception_factory{"alternator_ttl"}};
|
||||
bool shutting_down() { return _abort_source.abort_requested(); }
|
||||
stats _expiration_stats;
|
||||
public:
|
||||
// sharded_service<expiration_service>::start() creates this object on
|
||||
// all shards, so calls this constructor on each shard. Later, the
|
||||
// additional start() function should be invoked on all shards.
|
||||
expiration_service(data_dictionary::database, service::storage_proxy&, gms::gossiper&);
|
||||
future<> start();
|
||||
future<> run();
|
||||
// sharded_service<expiration_service>::stop() calls the following stop()
|
||||
// method on each shard. This stop() asks the service on this shard to
|
||||
// shut down as quickly as it can. The returned future indicates when the
|
||||
// service is no longer running.
|
||||
// stop() may be called even before start(), but may only be called once -
|
||||
// calling it twice will result in an exception.
|
||||
future<> stop();
|
||||
};
|
||||
|
||||
} // namespace alternator
|
||||
15
amplify.yml
15
amplify.yml
@@ -1,15 +0,0 @@
|
||||
version: 1
|
||||
applications:
|
||||
- frontend:
|
||||
phases:
|
||||
build:
|
||||
commands:
|
||||
- make setupenv
|
||||
- make dirhtml
|
||||
artifacts:
|
||||
baseDirectory: _build/dirhtml
|
||||
files:
|
||||
- '**/*'
|
||||
cache:
|
||||
paths: []
|
||||
appRoot: docs
|
||||
@@ -1,79 +0,0 @@
|
||||
# Generate C++ sources from Swagger definitions
|
||||
set(swagger_files
|
||||
api-doc/authorization_cache.json
|
||||
api-doc/cache_service.json
|
||||
api-doc/collectd.json
|
||||
api-doc/column_family.json
|
||||
api-doc/commitlog.json
|
||||
api-doc/compaction_manager.json
|
||||
api-doc/config.json
|
||||
api-doc/endpoint_snitch_info.json
|
||||
api-doc/error_injection.json
|
||||
api-doc/failure_detector.json
|
||||
api-doc/gossiper.json
|
||||
api-doc/hinted_handoff.json
|
||||
api-doc/lsa.json
|
||||
api-doc/messaging_service.json
|
||||
api-doc/metrics.json
|
||||
api-doc/raft.json
|
||||
api-doc/storage_proxy.json
|
||||
api-doc/storage_service.json
|
||||
api-doc/stream_manager.json
|
||||
api-doc/system.json
|
||||
api-doc/tasks.json
|
||||
api-doc/task_manager.json
|
||||
api-doc/task_manager_test.json
|
||||
api-doc/utils.json)
|
||||
|
||||
foreach(f ${swagger_files})
|
||||
get_filename_component(fname "${f}" NAME_WE)
|
||||
get_filename_component(dir "${f}" DIRECTORY)
|
||||
seastar_generate_swagger(
|
||||
TARGET scylla_swagger_gen_${fname}
|
||||
VAR scylla_swagger_gen_${fname}_files
|
||||
IN_FILE "${CMAKE_CURRENT_SOURCE_DIR}/${f}"
|
||||
OUT_DIR "${scylla_gen_build_dir}/api/${dir}")
|
||||
list(APPEND swagger_gen_files "${scylla_swagger_gen_${fname}_files}")
|
||||
endforeach()
|
||||
|
||||
add_library(api)
|
||||
target_sources(api
|
||||
PRIVATE
|
||||
api.cc
|
||||
cache_service.cc
|
||||
collectd.cc
|
||||
column_family.cc
|
||||
commitlog.cc
|
||||
compaction_manager.cc
|
||||
config.cc
|
||||
endpoint_snitch.cc
|
||||
error_injection.cc
|
||||
authorization_cache.cc
|
||||
failure_detector.cc
|
||||
gossiper.cc
|
||||
hinted_handoff.cc
|
||||
lsa.cc
|
||||
messaging_service.cc
|
||||
raft.cc
|
||||
storage_proxy.cc
|
||||
storage_service.cc
|
||||
stream_manager.cc
|
||||
system.cc
|
||||
tasks.cc
|
||||
task_manager.cc
|
||||
task_manager_test.cc
|
||||
token_metadata.cc
|
||||
${swagger_gen_files})
|
||||
target_include_directories(api
|
||||
PUBLIC
|
||||
${CMAKE_SOURCE_DIR}
|
||||
${scylla_gen_build_dir})
|
||||
target_link_libraries(api
|
||||
idl
|
||||
wasmtime_bindings
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
absl::headers)
|
||||
|
||||
check_headers(check-headers api
|
||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/authorization_cache",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/authorization_cache/reset",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Reset cache",
|
||||
"type":"void",
|
||||
"nickname":"authorization_cache_reset",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get row cache save period in seconds",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_save_period_in_seconds",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -35,7 +35,7 @@
|
||||
"description":"row cache save period in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -48,7 +48,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get key cache save period in seconds",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_key_cache_save_period_in_seconds",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -70,7 +70,7 @@
|
||||
"description":"key cache save period in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -83,7 +83,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get counter cache save period in seconds",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_counter_cache_save_period_in_seconds",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -105,7 +105,7 @@
|
||||
"description":"counter cache save period in seconds",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -118,7 +118,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get row cache keys to save",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_keys_to_save",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -140,7 +140,7 @@
|
||||
"description":"row cache keys to save",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -153,7 +153,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get key cache keys to save",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_key_cache_keys_to_save",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -175,7 +175,7 @@
|
||||
"description":"key cache keys to save",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -188,7 +188,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get counter cache keys to save",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_counter_cache_keys_to_save",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -210,7 +210,7 @@
|
||||
"description":"counter cache keys to save",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -448,7 +448,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get key entries",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_key_entries",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -568,7 +568,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get row entries",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_row_entries",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -688,7 +688,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get counter entries",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_counter_entries",
|
||||
"produces": [
|
||||
"application/json"
|
||||
|
||||
@@ -67,7 +67,7 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"pluginid",
|
||||
"description":"The plugin ID, describe the component the metric belongs to. Examples are cache and alternator, etc'. Regex are supported.",
|
||||
"description":"The plugin ID, describe the component the metric belongs to. Examples are cache, thrift, etc'. Regex are supported.The plugin ID, describe the component the metric belong to. Examples are: cache, thrift etc'. regex are supported",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -199,4 +199,4 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -70,7 +70,7 @@
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Force a major compaction of this column family",
|
||||
"type":"void",
|
||||
"type":"string",
|
||||
"nickname":"force_major_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -84,20 +84,12 @@
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when the table is flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"split_output",
|
||||
"description":"true if the output of the major compaction should be split in several sstables",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"type":"bool",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -129,7 +121,7 @@
|
||||
"description":"The minimum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -180,7 +172,7 @@
|
||||
"description":"The maximum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -211,7 +203,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Sets the minimum and maximum number of sstables in queue before compaction kicks off",
|
||||
"summary":"Sets the minumum and maximum number of sstables in queue before compaction kicks off",
|
||||
"type":"string",
|
||||
"nickname":"set_compaction_threshold",
|
||||
"produces":[
|
||||
@@ -231,7 +223,7 @@
|
||||
"description":"The maximum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
@@ -239,7 +231,7 @@
|
||||
"description":"The minimum number of sstables in queue before compaction kicks off",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -388,116 +380,16 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"check if the auto_compaction property is enabled for a given table",
|
||||
"summary":"check if the auto compaction disabled",
|
||||
"type":"boolean",
|
||||
"nickname":"get_auto_compaction",
|
||||
"nickname":"is_auto_compaction_disabled",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Enable table auto compaction",
|
||||
"type":"void",
|
||||
"nickname":"enable_auto_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Disable table auto compaction",
|
||||
"type":"void",
|
||||
"nickname":"disable_auto_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/column_family/tombstone_gc/{name}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Check if tombstone GC is enabled for a given table",
|
||||
"type":"boolean",
|
||||
"nickname":"get_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Enable tombstone GC for a given table",
|
||||
"type":"void",
|
||||
"nickname":"enable_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Disable tombstone GC for a given table",
|
||||
"type":"void",
|
||||
"nickname":"disable_tombstone_gc",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The table name in keyspace:name format",
|
||||
"description":"The column family name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -563,7 +455,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns a list of sstable filenames that contain the given partition key on this node",
|
||||
"summary":"Returns a list of filenames that contain the given key on this node",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
@@ -583,7 +475,7 @@
|
||||
},
|
||||
{
|
||||
"name":"key",
|
||||
"description":"The partition key. In a composite-key scenario, use ':' to separate the columns in the key.",
|
||||
"description":"The key",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -652,7 +544,7 @@
|
||||
"summary":"sstable count for each level. empty unless leveled compaction is used",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type": "long"
|
||||
"type":"int"
|
||||
},
|
||||
"nickname":"get_sstable_count_per_level",
|
||||
"produces":[
|
||||
@@ -719,54 +611,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/column_family/toppartitions/{name}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Toppartitions query",
|
||||
"type":"toppartitions_query_results",
|
||||
"nickname":"toppartitions",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"name",
|
||||
"description":"The column family name in keyspace:name format",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"duration",
|
||||
"description":"Duration (in milliseconds) of monitoring operation",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"list_size",
|
||||
"description":"number of the top partitions to list",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"capacity",
|
||||
"description":"capacity of stream summary: determines amount of resources used in query processing",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/column_family/metrics/memtable_columns_count/",
|
||||
"operations":[
|
||||
@@ -1029,7 +873,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get memtable switch count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_memtable_switch_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1053,7 +897,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all memtable switch count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_memtable_switch_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1190,7 +1034,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get read latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1343,7 +1187,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all read latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1359,7 +1203,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1383,7 +1227,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all range latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1399,7 +1243,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get write latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1552,7 +1396,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all write latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1568,7 +1412,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get pending flushes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_pending_flushes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1592,7 +1436,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all pending flushes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_pending_flushes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1608,7 +1452,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get pending compactions",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_pending_compactions",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1632,7 +1476,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all pending compactions",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_pending_compactions",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1648,7 +1492,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get live ss table count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_live_ss_table_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1672,7 +1516,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all live ss table count",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_live_ss_table_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1688,7 +1532,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get live disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_live_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1712,7 +1556,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all live disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_live_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1728,7 +1572,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1752,7 +1596,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total disk space used",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_disk_space_used",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2208,7 +2052,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get speculative retries",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_speculative_retries",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2232,7 +2076,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all speculative retries",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_speculative_retries",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2312,7 +2156,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get row cache hit out of range",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_hit_out_of_range",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2336,7 +2180,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all row cache hit out of range",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_row_cache_hit_out_of_range",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2352,7 +2196,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get row cache hit",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_hit",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2376,7 +2220,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all row cache hit",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_row_cache_hit",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2392,7 +2236,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get row cache miss",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_row_cache_miss",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2416,7 +2260,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all row cache miss",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_row_cache_miss",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2432,7 +2276,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get cas prepare",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_cas_prepare",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2456,7 +2300,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get cas propose",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_cas_propose",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2480,7 +2324,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get cas commit",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_cas_commit",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -2972,52 +2816,6 @@
|
||||
"description":"The column family type"
|
||||
}
|
||||
}
|
||||
},
|
||||
"toppartitions_record":{
|
||||
"id":"toppartitions_record",
|
||||
"description":"nodetool toppartitions query record",
|
||||
"properties":{
|
||||
"partition":{
|
||||
"type":"string",
|
||||
"description":"Partition key"
|
||||
},
|
||||
"count":{
|
||||
"type":"long",
|
||||
"description":"Number of read/write operations"
|
||||
},
|
||||
"error":{
|
||||
"type":"long",
|
||||
"description":"Indication of inaccuracy in counting PKs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"toppartitions_query_results":{
|
||||
"id":"toppartitions_query_results",
|
||||
"description":"nodetool toppartitions query results",
|
||||
"properties":{
|
||||
"read_cardinality":{
|
||||
"type":"long",
|
||||
"description":"Number of the unique operations in the sample set"
|
||||
},
|
||||
"read":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"toppartitions_record"
|
||||
},
|
||||
"description":"Read results"
|
||||
},
|
||||
"write_cardinality":{
|
||||
"type":"long",
|
||||
"description":"Number of the unique operations in the sample set"
|
||||
},
|
||||
"write":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"toppartitions_record"
|
||||
},
|
||||
"description":"Write results"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,21 +144,6 @@
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/commitlog/metrics/max_disk_size",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get max disk size",
|
||||
"type": "long",
|
||||
"nickname": "get_max_disk_size",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -102,47 +102,7 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"type",
|
||||
"description":"The type of compaction to stop. Can be one of: COMPACTION | CLEANUP | SCRUB | UPGRADE | RESHAPE",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/compaction_manager/stop_keyspace_compaction/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Stop all running compaction-like tasks in the given keyspace and tables having the provided type.",
|
||||
"type":"void",
|
||||
"nickname":"stop_keyspace_compaction",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to stop compaction in",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"tables",
|
||||
"description":"Comma-separated tables to stop compaction in",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"type",
|
||||
"description":"The type of compaction to stop. Can be one of: COMPACTION | CLEANUP | SCRUB | UPGRADE | RESHAPE",
|
||||
"description":"the type of compaction to stop. Can be one of: - COMPACTION - VALIDATION - CLEANUP - SCRUB - INDEX_BUILD",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -158,7 +118,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get pending tasks",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_pending_tasks",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -167,24 +127,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/compaction_manager/metrics/pending_tasks_by_table",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get pending tasks by table name",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "pending_compaction"
|
||||
},
|
||||
"nickname": "get_pending_tasks_by_table",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/compaction_manager/metrics/completed_tasks",
|
||||
"operations": [
|
||||
@@ -221,7 +163,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get bytes compacted",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_bytes_compacted",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -237,7 +179,7 @@
|
||||
"description":"A row merged information",
|
||||
"properties":{
|
||||
"key":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"The number of sstable"
|
||||
},
|
||||
"value":{
|
||||
@@ -302,23 +244,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"pending_compaction": {
|
||||
"id": "pending_compaction",
|
||||
"properties": {
|
||||
"cf": {
|
||||
"type": "string",
|
||||
"description": "The column family name"
|
||||
},
|
||||
"ks": {
|
||||
"type":"string",
|
||||
"description": "The keyspace name"
|
||||
},
|
||||
"task": {
|
||||
"type":"long",
|
||||
"description": "The number of pending tasks"
|
||||
}
|
||||
}
|
||||
},
|
||||
"history": {
|
||||
"id":"history",
|
||||
"description":"Compaction history information",
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
"/v2/config/{id}": {
|
||||
"get": {
|
||||
"description": "Return a config value",
|
||||
"operationId": "find_config_id",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": ["config"],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"description": "ID of config to return",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Config value"
|
||||
},
|
||||
"default": {
|
||||
"description": "unexpected error",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/ErrorModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,212 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/error_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/v2/error_injection/injection/{injection}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Activate an injection that triggers an error in code",
|
||||
"type":"void",
|
||||
"nickname":"enable_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name, should correspond to an injection added in code",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"one_shot",
|
||||
"description":"boolean flag indicating whether the injection should be enabled to trigger only once",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"parameters",
|
||||
"description":"dict of parameters to pass to the injection (json format)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"dict",
|
||||
"paramType":"body"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Deactivate an injection previously activated by the API",
|
||||
"type":"void",
|
||||
"nickname":"disable_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Read the state of an injection from all shards",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"error_injection_info"
|
||||
},
|
||||
"nickname":"read_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/v2/error_injection/injection/{injection}/message",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Send message to trigger an event in injection's code",
|
||||
"type":"void",
|
||||
"nickname":"message_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name, should correspond to an injection added in code",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/v2/error_injection/disconnect/{ip}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Drop connection to a given IP",
|
||||
"type":"void",
|
||||
"nickname":"inject_disconnect",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"ip",
|
||||
"description":"IP address to disconnect from",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/v2/error_injection/injection",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"List all enabled injections on all shards, i.e. injections that will trigger an error in the code",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"nickname":"get_enabled_injections_on_all",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Deactivate all injections previously activated on all shards by the API",
|
||||
"type":"void",
|
||||
"nickname":"disable_on_all",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"components":{
|
||||
"schemas": {
|
||||
"dict": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"models":{
|
||||
"mapper":{
|
||||
"id":"mapper",
|
||||
"description":"A key value mapping",
|
||||
"properties":{
|
||||
"key":{
|
||||
"type":"string",
|
||||
"description":"The key"
|
||||
},
|
||||
"value":{
|
||||
"type":"string",
|
||||
"description":"The value"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error_injection_info":{
|
||||
"id":"error_injection_info",
|
||||
"description":"Information about an error injection",
|
||||
"properties":{
|
||||
"enabled":{
|
||||
"type":"boolean",
|
||||
"description":"Is the error injection enabled"
|
||||
},
|
||||
"parameters":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"mapper"
|
||||
},
|
||||
"description":"The parameter values"
|
||||
}
|
||||
},
|
||||
"required":["enabled"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -110,7 +110,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get count down endpoint",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_down_endpoint_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -126,7 +126,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get count up endpoint",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_up_endpoint_count",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -180,11 +180,11 @@
|
||||
"description": "The endpoint address"
|
||||
},
|
||||
"generation": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The heart beat generation"
|
||||
},
|
||||
"version": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The heart beat version"
|
||||
},
|
||||
"update_time": {
|
||||
@@ -209,7 +209,7 @@
|
||||
"description": "Holds a version value for an application state",
|
||||
"properties": {
|
||||
"application_state": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The application state enum index"
|
||||
},
|
||||
"value": {
|
||||
@@ -217,7 +217,7 @@
|
||||
"description": "The version value"
|
||||
},
|
||||
"version": {
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"description": "The application state version"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the addresses of the down endpoints",
|
||||
"summary":"Get the addreses of the down endpoints",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
@@ -31,7 +31,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the addresses of live endpoints",
|
||||
"summary":"Get the addreses of live endpoints",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
@@ -75,7 +75,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns files which are pending for archival attempt. Does NOT include failed archive attempts",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_current_generation_number",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -99,7 +99,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get heart beat version for a node",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_current_heart_beat_version",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -148,30 +148,6 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/gossiper/force_remove_endpoint/{addr}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Force remove an endpoint from gossip",
|
||||
"type":"void",
|
||||
"nickname":"force_remove_endpoint",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"addr",
|
||||
"description":"The endpoint address",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -7,61 +7,6 @@
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/hinted_handoff/sync_point",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Creates a hints sync point. It can be used to wait until hints between given nodes are replayed. A sync point allows you to wait for hints accumulated at the moment of its creation - it won't wait for hints generated later. A sync point is described entirely by its ID - there is no state kept server-side, so there is no need to delete it.",
|
||||
"type":"string",
|
||||
"nickname":"create_hints_sync_point",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"target_hosts",
|
||||
"description":"A list of nodes towards which hints should be replayed. Multiple hosts can be listed by separating them with commas. If not provided or empty, the point will resolve when current hints towards all nodes in the cluster are sent.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the status of a hints sync point, possibly waiting for it to be reached.",
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"DONE",
|
||||
"IN_PROGRESS"
|
||||
],
|
||||
"nickname":"get_hints_sync_point",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"id",
|
||||
"description":"The ID of the hint sync point which should be checked or waited on",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"Timeout in seconds after which the query returns even if hints are still being replayed. No value or 0 will cause the query to return immediately. A negative value will cause the query to wait until the sync point is reached",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/hinted_handoff/hints",
|
||||
"operations":[
|
||||
@@ -154,7 +99,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get create hint count",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_create_hint_count",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -178,7 +123,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get not stored hints count",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_not_stored_hints_count",
|
||||
"produces": [
|
||||
"application/json"
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
"items":{
|
||||
"type":"message_counter"
|
||||
},
|
||||
"nickname":"get_replied_messages",
|
||||
"nickname":"get_completed_messages",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
@@ -191,7 +191,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the version number",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_version",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -245,14 +245,14 @@
|
||||
"GOSSIP_SHUTDOWN",
|
||||
"DEFINITIONS_UPDATE",
|
||||
"TRUNCATE",
|
||||
"UNUSED__REPLICATION_FINISHED",
|
||||
"REPLICATION_FINISHED",
|
||||
"MIGRATION_REQUEST",
|
||||
"PREPARE_MESSAGE",
|
||||
"PREPARE_DONE_MESSAGE",
|
||||
"UNUSED__STREAM_MUTATION",
|
||||
"STREAM_MUTATION",
|
||||
"STREAM_MUTATION_DONE",
|
||||
"COMPLETE_MESSAGE",
|
||||
"UNUSED__REPAIR_CHECKSUM_RANGE",
|
||||
"REPAIR_CHECKSUM_RANGE",
|
||||
"GET_SCHEMA_VERSION"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
"metrics_config": {
|
||||
"id": "metrics_config",
|
||||
"summary": "An entry in the metrics configuration",
|
||||
"properties": {
|
||||
"source_labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "The source labels, a match is based on concatenation of the labels"
|
||||
},
|
||||
"action": {
|
||||
"type": "string",
|
||||
"description": "The action to perform on match",
|
||||
"enum": ["skip_when_empty", "report_when_empty", "replace", "keep", "drop", "drop_label"]
|
||||
},
|
||||
"target_label": {
|
||||
"type": "string",
|
||||
"description": "The application state version"
|
||||
},
|
||||
"replacement": {
|
||||
"type": "string",
|
||||
"description": "The replacement string to use when replacing a value"
|
||||
},
|
||||
"regex": {
|
||||
"type": "string",
|
||||
"description": "The regex string to use when replacing a value"
|
||||
},
|
||||
"separator": {
|
||||
"type": "string",
|
||||
"description": "The separator string to use when concatenating the labels"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
"/v2/metrics-config/":{
|
||||
"get":{
|
||||
"description":"Return the metrics layer configuration",
|
||||
"operationId":"get_metrics_config",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"tags":[
|
||||
"metrics"
|
||||
],
|
||||
"parameters":[
|
||||
],
|
||||
"responses":{
|
||||
"200":{
|
||||
"schema": {
|
||||
"type":"array",
|
||||
"items":{
|
||||
"$ref":"#/definitions/metrics_config",
|
||||
"description":"metrics Config value"
|
||||
}
|
||||
}
|
||||
},
|
||||
"default":{
|
||||
"description":"unexpected error",
|
||||
"schema":{
|
||||
"$ref":"#/definitions/ErrorModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"description":"Set the metrics layer relabel configuration",
|
||||
"operationId":"set_metrics_config",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"tags":[
|
||||
"metrics"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"in":"body",
|
||||
"name":"conf",
|
||||
"description":"An array of relabel_config objects",
|
||||
"schema": {
|
||||
"type":"array",
|
||||
"items":{
|
||||
"$ref":"#/definitions/metrics_config",
|
||||
"description":"metrics Config value"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses":{
|
||||
"200":{
|
||||
"description": "OK"
|
||||
},
|
||||
"default":{
|
||||
"description":"unexpected error",
|
||||
"schema":{
|
||||
"$ref":"#/definitions/ErrorModel"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,99 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/raft",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/raft/trigger_snapshot/{group_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Triggers snapshot creation and log truncation for the given Raft group",
|
||||
"type":"string",
|
||||
"nickname":"trigger_snapshot",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"group_id",
|
||||
"description":"The ID of the group which should get snapshotted",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"timeout",
|
||||
"description":"Timeout in seconds after which the endpoint returns a failure. If not provided, 60s is used.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/raft/leader_host",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns host ID of the current leader of the given Raft group",
|
||||
"type":"string",
|
||||
"nickname":"get_leader_host",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"group_id",
|
||||
"description":"The ID of the group. When absent, group0 is used.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/raft/read_barrier",
|
||||
"operations": [
|
||||
{
|
||||
"method": "POST",
|
||||
"summary": "Triggers read barrier for the given Raft group to wait for previously committed commands in this group to be applied locally. For example, can be used on group 0 to wait for the node to obtain latest schema changes.",
|
||||
"type": "string",
|
||||
"nickname": "read_barrier",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "group_id",
|
||||
"description": "The ID of the group. When absent, group0 is used.",
|
||||
"required": false,
|
||||
"allowMultiple": false,
|
||||
"type": "string",
|
||||
"paramType": "query"
|
||||
},
|
||||
{
|
||||
"name": "timeout",
|
||||
"description": "Timeout in seconds after which the endpoint returns a failure. If not provided, 60s is used.",
|
||||
"required": false,
|
||||
"allowMultiple": false,
|
||||
"type": "long",
|
||||
"paramType": "query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -68,7 +68,7 @@
|
||||
"summary":"Get the hinted handoff enabled by dc",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"array"
|
||||
"type":"mapper_list"
|
||||
},
|
||||
"nickname":"get_hinted_handoff_enabled_by_dc",
|
||||
"produces":[
|
||||
@@ -105,7 +105,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get the max hint window",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_max_hint_window",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -128,7 +128,7 @@
|
||||
"description":"max hint window in ms",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -141,7 +141,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get max hints in progress",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_max_hints_in_progress",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -164,7 +164,7 @@
|
||||
"description":"max hints in progress",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
@@ -177,7 +177,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"get hints in progress",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_hints_in_progress",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -602,7 +602,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_write_metrics_unfinished_commit",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -632,7 +632,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_write_metrics_condition_not_met",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -641,28 +641,13 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_write/failed_read_round_optimization",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas write metrics",
|
||||
"type": "long",
|
||||
"nickname": "get_cas_write_metrics_failed_read_round_optimization",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_read/unfinished_commit",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas read metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_read_metrics_unfinished_commit",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -686,13 +671,28 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_read/condition_not_met",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get cas read metrics",
|
||||
"type": "int",
|
||||
"nickname": "get_cas_read_metrics_condition_not_met",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/read/timeouts",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get read metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_read_metrics_timeouts",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -707,7 +707,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get read metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_read_metrics_unavailables",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -791,36 +791,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_read/moving_average_histogram",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get CAS read rate and latency histogram",
|
||||
"$ref": "#/utils/rate_moving_average_and_histogram",
|
||||
"nickname": "get_cas_read_metrics_latency_histogram",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/view_write/moving_average_histogram",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get view write rate and latency histogram",
|
||||
"$ref": "#/utils/rate_moving_average_and_histogram",
|
||||
"nickname": "get_view_write_metrics_latency_histogram",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/range/moving_average_histogram",
|
||||
"operations": [
|
||||
@@ -842,7 +812,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get range metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_range_metrics_timeouts",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -857,7 +827,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get range metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_range_metrics_unavailables",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -902,7 +872,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_write_metrics_timeouts",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -917,7 +887,7 @@
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get write metrics",
|
||||
"type": "long",
|
||||
"type": "int",
|
||||
"nickname": "get_write_metrics_unavailables",
|
||||
"produces": [
|
||||
"application/json"
|
||||
@@ -986,21 +956,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/storage_proxy/metrics/cas_write/moving_average_histogram",
|
||||
"operations": [
|
||||
{
|
||||
"method": "GET",
|
||||
"summary": "Get CAS write rate and latency histogram",
|
||||
"$ref": "#/utils/rate_moving_average_and_histogram",
|
||||
"nickname": "get_cas_write_metrics_latency_histogram",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"parameters": []
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_proxy/metrics/read/estimated_histogram/",
|
||||
"operations":[
|
||||
@@ -1023,7 +978,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get read latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_read_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1055,7 +1010,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get write latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_write_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -1087,7 +1042,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get range latency",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_range_latency",
|
||||
"produces":[
|
||||
"application/json"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -32,7 +32,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get number of active outbound streams",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_active_streams_outbound",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -48,7 +48,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total incoming bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_incoming_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -72,7 +72,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total incoming bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_incoming_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -88,7 +88,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get total outgoing bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_total_outgoing_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -112,7 +112,7 @@
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all total outgoing bytes",
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"nickname":"get_all_total_outgoing_bytes",
|
||||
"produces":[
|
||||
"application/json"
|
||||
@@ -154,7 +154,7 @@
|
||||
"description":"The peer"
|
||||
},
|
||||
"session_index":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"The session index"
|
||||
},
|
||||
"connecting":{
|
||||
@@ -211,7 +211,7 @@
|
||||
"description":"The ID"
|
||||
},
|
||||
"files":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"Number of files to transfer. Can be 0 if nothing to transfer for some streaming request."
|
||||
},
|
||||
"total_size":{
|
||||
@@ -242,7 +242,7 @@
|
||||
"description":"The peer address"
|
||||
},
|
||||
"session_index":{
|
||||
"type": "long",
|
||||
"type":"int",
|
||||
"description":"The session index"
|
||||
},
|
||||
"file_name":{
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"version": "1.0.0",
|
||||
"title": "Scylla API",
|
||||
"description": "The scylla API version 2.0",
|
||||
"termsOfService": "http://www.scylladb.com/tos/",
|
||||
"contact": {
|
||||
"name": "Scylla Team",
|
||||
"email": "info@scylladb.com",
|
||||
"url": "http://scylladb.com"
|
||||
},
|
||||
"license": {
|
||||
"name": "AGPL",
|
||||
"url": "https://github.com/scylladb/scylla/blob/master/LICENSE.AGPL"
|
||||
}
|
||||
},
|
||||
"host": "{{Host}}",
|
||||
"basePath": "/",
|
||||
"schemes": [
|
||||
"http"
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
@@ -52,76 +52,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/log",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Write a message to the Scylla log",
|
||||
"type":"void",
|
||||
"nickname":"write_log_message",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"message",
|
||||
"description":"The message to write to the log",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"level",
|
||||
"description":"The logging level to use",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"error",
|
||||
"warn",
|
||||
"info",
|
||||
"debug",
|
||||
"trace"
|
||||
],
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/drop_sstable_caches",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Drop in-memory caches for data which is in sstables",
|
||||
"type":"void",
|
||||
"nickname":"drop_sstable_caches",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/uptime_ms",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get system uptime, in milliseconds",
|
||||
"type":"long",
|
||||
"nickname":"get_system_uptime",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/logger/{name}",
|
||||
"operations":[
|
||||
@@ -179,36 +109,6 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/dump_llvm_profile",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Dump llvm profile data (raw profile data) that can later be used for coverage reporting or PGO (no-op if the current binary is not instrumented)",
|
||||
"type":"void",
|
||||
"nickname":"dump_profile",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/highest_supported_sstable_version",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get highest supported sstable version",
|
||||
"type":"string",
|
||||
"nickname":"get_highest_supported_sstable_version",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/task_manager",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/task_manager/list_modules",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get all modules names",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"nickname":"get_modules",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/list_module_tasks/{module}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get a list of tasks",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"task_stats"
|
||||
},
|
||||
"nickname":"get_tasks",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"module",
|
||||
"description":"The module to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"internal",
|
||||
"description":"Boolean flag indicating whether internal tasks should be shown (false by default)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"table",
|
||||
"description":"The table to query about",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/task_status/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get task status",
|
||||
"type":"task_status",
|
||||
"nickname":"get_task_status",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/abort_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Abort running task and its descendants",
|
||||
"type":"void",
|
||||
"nickname":"abort_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to abort",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/wait_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Wait for a task to complete",
|
||||
"type":"task_status",
|
||||
"nickname":"wait_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to wait for",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/task_status_recursive/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get statuses of the task and all its descendants",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"task_status"
|
||||
},
|
||||
"nickname":"get_task_status_recursively",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/ttl",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Set ttl in seconds and get last value",
|
||||
"type":"long",
|
||||
"nickname":"get_and_update_ttl",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"ttl",
|
||||
"description":"The number of seconds for which the tasks will be kept in memory after it finishes",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
"task_stats" :{
|
||||
"id": "task_stats",
|
||||
"description":"A task statistics object",
|
||||
"properties":{
|
||||
"task_id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of a task"
|
||||
},
|
||||
"state":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of a task"
|
||||
},
|
||||
"type":{
|
||||
"type":"string",
|
||||
"description":"The description of the task"
|
||||
},
|
||||
"scope":{
|
||||
"type":"string",
|
||||
"description":"The scope of the task"
|
||||
},
|
||||
"keyspace":{
|
||||
"type":"string",
|
||||
"description":"The keyspace the task is working on (if applicable)"
|
||||
},
|
||||
"table":{
|
||||
"type":"string",
|
||||
"description":"The table the task is working on (if applicable)"
|
||||
},
|
||||
"entity":{
|
||||
"type":"string",
|
||||
"description":"Task-specific entity description"
|
||||
},
|
||||
"sequence_number":{
|
||||
"type":"long",
|
||||
"description":"The running sequence number of the task"
|
||||
}
|
||||
}
|
||||
},
|
||||
"task_status":{
|
||||
"id":"task_status",
|
||||
"description":"A task status object",
|
||||
"properties":{
|
||||
"id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of the task"
|
||||
},
|
||||
"type":{
|
||||
"type":"string",
|
||||
"description":"The description of the task"
|
||||
},
|
||||
"scope":{
|
||||
"type":"string",
|
||||
"description":"The scope of the task"
|
||||
},
|
||||
"state":{
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of the task"
|
||||
},
|
||||
"is_abortable":{
|
||||
"type":"boolean",
|
||||
"description":"Boolean flag indicating whether the task can be aborted"
|
||||
},
|
||||
"start_time":{
|
||||
"type":"datetime",
|
||||
"description":"The start time of the task"
|
||||
},
|
||||
"end_time":{
|
||||
"type":"datetime",
|
||||
"description":"The end time of the task (unspecified when the task is not completed)"
|
||||
},
|
||||
"error":{
|
||||
"type":"string",
|
||||
"description":"Error string, if the task failed"
|
||||
},
|
||||
"parent_id":{
|
||||
"type":"string",
|
||||
"description":"The uuid of the parent task"
|
||||
},
|
||||
"sequence_number":{
|
||||
"type":"long",
|
||||
"description":"The running sequence number of the task"
|
||||
},
|
||||
"shard":{
|
||||
"type":"long",
|
||||
"description":"The number of a shard the task is running on"
|
||||
},
|
||||
"keyspace":{
|
||||
"type":"string",
|
||||
"description":"The keyspace the task is working on (if applicable)"
|
||||
},
|
||||
"table":{
|
||||
"type":"string",
|
||||
"description":"The table the task is working on (if applicable)"
|
||||
},
|
||||
"entity":{
|
||||
"type":"string",
|
||||
"description":"Task-specific entity description"
|
||||
},
|
||||
"progress_units":{
|
||||
"type":"string",
|
||||
"description":"A description of the progress units"
|
||||
},
|
||||
"progress_total":{
|
||||
"type":"double",
|
||||
"description":"The total number of units to complete for the task"
|
||||
},
|
||||
"progress_completed":{
|
||||
"type":"double",
|
||||
"description":"The number of units completed so far"
|
||||
},
|
||||
"children_ids":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"string"
|
||||
},
|
||||
"description":"Task IDs of children of this task"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,153 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/task_manager_test",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/task_manager_test/test_module",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Register test module in task manager",
|
||||
"type":"void",
|
||||
"nickname":"register_test_module",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Unregister test module in task manager",
|
||||
"type":"void",
|
||||
"nickname":"unregister_test_module",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/test_task",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Register test task",
|
||||
"type":"string",
|
||||
"nickname":"register_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to register",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"shard",
|
||||
"description":"The shard of the task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"long",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"parent_id",
|
||||
"description":"The uuid of a parent task",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace the task is working on",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"table",
|
||||
"description":"The table the task is working on",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"entity",
|
||||
"description":"Task-specific entity description",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"DELETE",
|
||||
"summary":"Unregister test task",
|
||||
"type":"void",
|
||||
"nickname":"unregister_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to register",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager_test/finish_test_task/{task_id}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Finish test task",
|
||||
"type":"void",
|
||||
"nickname":"finish_test_task",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"task_id",
|
||||
"description":"The uuid of a task to finish",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"error",
|
||||
"description":"The error with which task fails (if it does)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,230 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/tasks",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/tasks/compaction/keyspace_compaction/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Forces major compaction of a single keyspace asynchronously, returns uuid which can be used to check progress with task manager",
|
||||
"type":"string",
|
||||
"nickname":"force_keyspace_compaction_async",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table (column family) names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"flush_memtables",
|
||||
"description":"Controls flushing of memtables before compaction (true by default). Set to \"false\" to skip automatic flushing of memtables before compaction, e.g. when tables were flushed explicitly before invoking the compaction api.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/tasks/compaction/keyspace_cleanup/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Trigger a cleanup of keys on a single keyspace asynchronously, returns uuid which can be used to check progress with task manager",
|
||||
"type": "string",
|
||||
"nickname":"force_keyspace_cleanup_async",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table (column family) names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/tasks/compaction/keyspace_offstrategy_compaction/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Perform offstrategy compaction, if needed, in a single keyspace asynchronously, returns uuid which can be used to check progress with task manager",
|
||||
"type":"string",
|
||||
"nickname":"perform_keyspace_offstrategy_compaction_async",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to operate on",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table (column family) names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/tasks/compaction/keyspace_scrub/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Scrub (deserialize + reserialize at the latest version, resolving corruptions if any) the given keyspace asynchronously, returns uuid which can be used to check progress with task manager. If columnFamilies array is empty, all CFs are scrubbed. Scrubbed CFs will be snapshotted first, if disableSnapshot is false. Scrub has the following modes: Abort (default) - abort scrub if corruption is detected; Skip (same as `skip_corrupted=true`) skip over corrupt data, omitting them from the output; Segregate - segregate data into multiple sstables if needed, such that each sstable contains data with valid order; Validate - read (no rewrite) and validate data, logging any problems found.",
|
||||
"type": "string",
|
||||
"nickname":"scrub_async",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"disable_snapshot",
|
||||
"description":"When set to true, disable snapshot",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"skip_corrupted",
|
||||
"description":"When set to true, skip corrupted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"scrub_mode",
|
||||
"description":"How to handle corrupt data (overrides 'skip_corrupted'); ",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"ABORT",
|
||||
"SKIP",
|
||||
"SEGREGATE",
|
||||
"VALIDATE"
|
||||
],
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"quarantine_mode",
|
||||
"description":"Controls whether to scrub quarantined sstables (default INCLUDE)",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"enum":[
|
||||
"INCLUDE",
|
||||
"EXCLUDE",
|
||||
"ONLY"
|
||||
],
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to query about",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table (column family) names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/tasks/compaction/keyspace_upgrade_sstables/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip bad rows and do not snapshot sstables first asynchronously, returns uuid which can be used to check progress with task manager.",
|
||||
"type": "string",
|
||||
"nickname":"upgrade_sstables_async",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"exclude_current_version",
|
||||
"description":"When set to true exclude current version",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Comma-separated table (column family) names",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -75,7 +75,7 @@
|
||||
"items":{
|
||||
"type":"double"
|
||||
},
|
||||
"description":"One, five and fifteen minutes rates"
|
||||
"description":"One, five and fifteen mintues rates"
|
||||
},
|
||||
"mean_rate": {
|
||||
"type":"double",
|
||||
|
||||
331
api/api.cc
331
api/api.cc
@@ -1,17 +1,29 @@
|
||||
/*
|
||||
* Copyright 2015-present ScyllaDB
|
||||
* Copyright 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "api.hh"
|
||||
#include <seastar/http/file_handler.hh>
|
||||
#include <seastar/http/transformers.hh>
|
||||
#include <seastar/http/api_docs.hh>
|
||||
#include "http/file_handler.hh"
|
||||
#include "http/transformers.hh"
|
||||
#include "http/api_docs.hh"
|
||||
#include "storage_service.hh"
|
||||
#include "token_metadata.hh"
|
||||
#include "commitlog.hh"
|
||||
#include "gossiper.hh"
|
||||
#include "failure_detector.hh"
|
||||
@@ -24,26 +36,16 @@
|
||||
#include "endpoint_snitch.hh"
|
||||
#include "compaction_manager.hh"
|
||||
#include "hinted_handoff.hh"
|
||||
#include "error_injection.hh"
|
||||
#include "authorization_cache.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "http/exception.hh"
|
||||
#include "stream_manager.hh"
|
||||
#include "system.hh"
|
||||
#include "api/config.hh"
|
||||
#include "task_manager.hh"
|
||||
#include "task_manager_test.hh"
|
||||
#include "tasks.hh"
|
||||
#include "raft.hh"
|
||||
|
||||
logging::logger apilog("api");
|
||||
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
static std::unique_ptr<reply> exception_reply(std::exception_ptr eptr) {
|
||||
try {
|
||||
std::rethrow_exception(eptr);
|
||||
} catch (const replica::no_such_keyspace& ex) {
|
||||
} catch (const no_such_keyspace& ex) {
|
||||
throw bad_param_exception(ex.what());
|
||||
}
|
||||
// We never going to get here
|
||||
@@ -52,41 +54,20 @@ static std::unique_ptr<reply> exception_reply(std::exception_ptr eptr) {
|
||||
|
||||
future<> set_server_init(http_context& ctx) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
auto rb02 = std::make_shared < api_registry_builder20 > (ctx.api_doc, "/v2");
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, rb02](routes& r) {
|
||||
return ctx.http_server.set_routes([rb, &ctx](routes& r) {
|
||||
r.register_exeption_handler(exception_reply);
|
||||
r.put(GET, "/ui", new httpd::file_handler(ctx.api_dir + "/index.html",
|
||||
new content_replace("html")));
|
||||
r.add(GET, url("/ui").remainder("path"), new httpd::directory_handler(ctx.api_dir,
|
||||
new content_replace("html")));
|
||||
rb->set_api_doc(r);
|
||||
rb02->set_api_doc(r);
|
||||
rb02->register_api_file(r, "swagger20_header");
|
||||
rb02->register_api_file(r, "metrics");
|
||||
rb->register_function(r, "system",
|
||||
"The system related API");
|
||||
rb02->add_definitions_file(r, "metrics");
|
||||
set_system(ctx, r);
|
||||
rb->register_function(r, "error_injection",
|
||||
"The error injection API");
|
||||
set_error_injection(ctx, r);
|
||||
rb->register_function(r, "storage_proxy",
|
||||
"The storage proxy API");
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg) {
|
||||
auto rb02 = std::make_shared < api_registry_builder20 > (ctx.api_doc, "/v2");
|
||||
return ctx.http_server.set_routes([&ctx, &cfg, rb02](routes& r) {
|
||||
set_config(rb02, ctx, r, cfg, false);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_config(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_config(ctx, r); });
|
||||
}
|
||||
|
||||
static future<> register_api(http_context& ctx, const sstring& api_name,
|
||||
const sstring api_desc,
|
||||
std::function<void(http_context& ctx, routes& r)> f) {
|
||||
@@ -98,182 +79,49 @@ static future<> register_api(http_context& ctx, const sstring& api_name,
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_transport_controller(http_context& ctx, cql_transport::controller& ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &ctl] (routes& r) { set_transport_controller(ctx, r, ctl); });
|
||||
future<> set_server_storage_service(http_context& ctx) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", set_storage_service);
|
||||
}
|
||||
|
||||
future<> unset_transport_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_transport_controller(ctx, r); });
|
||||
future<> set_server_snitch(http_context& ctx) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", set_endpoint_snitch);
|
||||
}
|
||||
|
||||
future<> set_thrift_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { set_thrift_controller(ctx, r); });
|
||||
future<> set_server_gossip(http_context& ctx) {
|
||||
return register_api(ctx, "gossiper",
|
||||
"The gossiper API", set_gossiper);
|
||||
}
|
||||
|
||||
future<> unset_thrift_controller(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_thrift_controller(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client& group0_client) {
|
||||
return register_api(ctx, "storage_service", "The storage service API", [&ss, &group0_client] (http_context& ctx, routes& r) {
|
||||
set_storage_service(ctx, r, ss, group0_client);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_storage_service(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_storage_service(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_load_meter(http_context& ctx, service::load_meter& lm) {
|
||||
return ctx.http_server.set_routes([&ctx, &lm] (routes& r) { set_load_meter(ctx, r, lm); });
|
||||
}
|
||||
|
||||
future<> unset_load_meter(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_load_meter(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader) {
|
||||
return ctx.http_server.set_routes([&ctx, &sst_loader] (routes& r) { set_sstables_loader(ctx, r, sst_loader); });
|
||||
}
|
||||
|
||||
future<> unset_server_sstables_loader(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_sstables_loader(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb) {
|
||||
return ctx.http_server.set_routes([&ctx, &vb] (routes& r) { set_view_builder(ctx, r, vb); });
|
||||
}
|
||||
|
||||
future<> unset_server_view_builder(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_view_builder(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_repair(http_context& ctx, sharded<repair_service>& repair) {
|
||||
return ctx.http_server.set_routes([&ctx, &repair] (routes& r) { set_repair(ctx, r, repair); });
|
||||
}
|
||||
|
||||
future<> unset_server_repair(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_repair(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_authorization_cache(http_context &ctx, sharded<auth::service> &auth_service) {
|
||||
return register_api(ctx, "authorization_cache",
|
||||
"The authorization cache API", [&auth_service] (http_context &ctx, routes &r) {
|
||||
set_authorization_cache(ctx, r, auth_service);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_authorization_cache(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_authorization_cache(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
return ctx.http_server.set_routes([&ctx, &snap_ctl] (routes& r) { set_snapshot(ctx, r, snap_ctl); });
|
||||
}
|
||||
|
||||
future<> unset_server_snapshot(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_snapshot(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_token_metadata(http_context& ctx, sharded<locator::shared_token_metadata>& tm) {
|
||||
return ctx.http_server.set_routes([&ctx, &tm] (routes& r) { set_token_metadata(ctx, r, tm); });
|
||||
}
|
||||
|
||||
future<> unset_server_token_metadata(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_token_metadata(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch) {
|
||||
return register_api(ctx, "endpoint_snitch_info", "The endpoint snitch info API", [&snitch] (http_context& ctx, routes& r) {
|
||||
set_endpoint_snitch(ctx, r, snitch);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_snitch(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_endpoint_snitch(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g) {
|
||||
co_await register_api(ctx, "gossiper",
|
||||
"The gossiper API", [&g] (http_context& ctx, routes& r) {
|
||||
set_gossiper(ctx, r, g.local());
|
||||
});
|
||||
co_await register_api(ctx, "failure_detector",
|
||||
"The failure detector API", [&g] (http_context& ctx, routes& r) {
|
||||
set_failure_detector(ctx, r, g.local());
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_gossip(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) {
|
||||
unset_gossiper(ctx, r);
|
||||
unset_failure_detector(ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_column_family(http_context& ctx, sharded<db::system_keyspace>& sys_ks) {
|
||||
future<> set_server_load_sstable(http_context& ctx) {
|
||||
return register_api(ctx, "column_family",
|
||||
"The column family API", [&sys_ks] (http_context& ctx, routes& r) {
|
||||
set_column_family(ctx, r, sys_ks);
|
||||
});
|
||||
"The column family API", set_column_family);
|
||||
}
|
||||
|
||||
future<> unset_server_column_family(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_column_family(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_messaging_service(http_context& ctx, sharded<netw::messaging_service>& ms) {
|
||||
future<> set_server_messaging_service(http_context& ctx) {
|
||||
return register_api(ctx, "messaging_service",
|
||||
"The messaging service API", [&ms] (http_context& ctx, routes& r) {
|
||||
set_messaging_service(ctx, r, ms);
|
||||
});
|
||||
}
|
||||
future<> unset_server_messaging_service(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_messaging_service(ctx, r); });
|
||||
"The messaging service API", set_messaging_service);
|
||||
}
|
||||
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_proxy>& proxy) {
|
||||
return ctx.http_server.set_routes([&ctx, &proxy] (routes& r) { set_storage_proxy(ctx, r, proxy); });
|
||||
future<> set_server_storage_proxy(http_context& ctx) {
|
||||
return register_api(ctx, "storage_proxy",
|
||||
"The storage proxy API", set_storage_proxy);
|
||||
}
|
||||
|
||||
future<> unset_server_storage_proxy(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_storage_proxy(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm) {
|
||||
future<> set_server_stream_manager(http_context& ctx) {
|
||||
return register_api(ctx, "stream_manager",
|
||||
"The stream manager API", [&sm] (http_context& ctx, routes& r) {
|
||||
set_stream_manager(ctx, r, sm);
|
||||
});
|
||||
"The stream manager API", set_stream_manager);
|
||||
}
|
||||
|
||||
future<> unset_server_stream_manager(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_stream_manager(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_cache(http_context& ctx) {
|
||||
return register_api(ctx, "cache_service",
|
||||
"The cache service API", set_cache_service);
|
||||
}
|
||||
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& proxy) {
|
||||
return register_api(ctx, "hinted_handoff",
|
||||
"The hinted handoff API", [&proxy] (http_context& ctx, routes& r) {
|
||||
set_hinted_handoff(ctx, r, proxy);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_hinted_handoff(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_hinted_handoff(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_compaction_manager(http_context& ctx) {
|
||||
future<> set_server_gossip_settle(http_context& ctx) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx](routes& r) {
|
||||
rb->register_function(r, "compaction_manager",
|
||||
"The Compaction manager API");
|
||||
set_compaction_manager(ctx, r);
|
||||
rb->register_function(r, "failure_detector",
|
||||
"The failure detector API");
|
||||
set_failure_detector(ctx,r);
|
||||
rb->register_function(r, "cache_service",
|
||||
"The cache service API");
|
||||
set_cache_service(ctx,r);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -281,102 +129,23 @@ future<> set_server_done(http_context& ctx) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx](routes& r) {
|
||||
rb->register_function(r, "compaction_manager",
|
||||
"The Compaction manager API");
|
||||
set_compaction_manager(ctx, r);
|
||||
rb->register_function(r, "lsa", "Log-structured allocator API");
|
||||
set_lsa(ctx, r);
|
||||
|
||||
rb->register_function(r, "commitlog",
|
||||
"The commit log API");
|
||||
set_commitlog(ctx,r);
|
||||
rb->register_function(r, "hinted_handoff",
|
||||
"The hinted handoff API");
|
||||
set_hinted_handoff(ctx, r);
|
||||
rb->register_function(r, "collectd",
|
||||
"The collectd API");
|
||||
set_collectd(ctx, r);
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &tm, &cfg = *cfg](routes& r) {
|
||||
rb->register_function(r, "task_manager",
|
||||
"The task manager API");
|
||||
set_task_manager(ctx, r, tm, cfg);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_task_manager(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_task_manager(ctx, r); });
|
||||
}
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
future<> set_server_task_manager_test(http_context& ctx, sharded<tasks::task_manager>& tm) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &tm](routes& r) mutable {
|
||||
rb->register_function(r, "task_manager_test",
|
||||
"The task manager test API");
|
||||
set_task_manager_test(ctx, r, tm);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_task_manager_test(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_task_manager_test(ctx, r); });
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
future<> set_server_tasks_compaction_module(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &ss, &snap_ctl](routes& r) {
|
||||
rb->register_function(r, "tasks",
|
||||
"The tasks API");
|
||||
set_tasks_compaction_module(ctx, r, ss, snap_ctl);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_tasks_compaction_module(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_tasks_compaction_module(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_raft(http_context& ctx, sharded<service::raft_group_registry>& raft_gr) {
|
||||
auto rb = std::make_shared<api_registry_builder>(ctx.api_doc);
|
||||
return ctx.http_server.set_routes([rb, &ctx, &raft_gr] (routes& r) {
|
||||
rb->register_function(r, "raft", "The Raft API");
|
||||
set_raft(ctx, r, raft_gr);
|
||||
});
|
||||
}
|
||||
|
||||
future<> unset_server_raft(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_raft(ctx, r); });
|
||||
}
|
||||
|
||||
void req_params::process(const request& req) {
|
||||
// Process mandatory parameters
|
||||
for (auto& [name, ent] : params) {
|
||||
if (!ent.is_mandatory) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
ent.value = req.get_path_param(name);
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Mandatory parameter '{}' was not provided", name));
|
||||
}
|
||||
}
|
||||
|
||||
// Process optional parameters
|
||||
for (auto& [name, value] : req.query_parameters) {
|
||||
try {
|
||||
auto& ent = params.at(name);
|
||||
if (ent.is_mandatory) {
|
||||
throw httpd::bad_param_exception(fmt::format("Parameter '{}' is expected to be provided as part of the request url", name));
|
||||
}
|
||||
ent.value = value;
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Unsupported optional parameter '{}'", name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
174
api/api.hh
174
api/api.hh
@@ -1,24 +1,34 @@
|
||||
/*
|
||||
* Copyright 2015-present ScyllaDB
|
||||
* Copyright 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <type_traits>
|
||||
#include "json/json_elements.hh"
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include <boost/units/detail/utility.hpp>
|
||||
#include "api/api_init.hh"
|
||||
#include "api/api-doc/utils.json.hh"
|
||||
#include "utils/histogram.hh"
|
||||
#include "utils/estimated_histogram.hh"
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "http/exception.hh"
|
||||
#include "api_init.hh"
|
||||
#include "seastarx.hh"
|
||||
|
||||
namespace api {
|
||||
@@ -26,10 +36,8 @@ namespace api {
|
||||
template<class T>
|
||||
std::vector<sstring> container_to_vec(const T& container) {
|
||||
std::vector<sstring> res;
|
||||
res.reserve(std::size(container));
|
||||
|
||||
for (const auto& i : container) {
|
||||
res.push_back(fmt::to_string(i));
|
||||
for (auto i : container) {
|
||||
res.push_back(boost::lexical_cast<std::string>(i));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@@ -37,43 +45,37 @@ std::vector<sstring> container_to_vec(const T& container) {
|
||||
template<class T>
|
||||
std::vector<T> map_to_key_value(const std::map<sstring, sstring>& map) {
|
||||
std::vector<T> res;
|
||||
res.reserve(map.size());
|
||||
|
||||
for (const auto& [key, value] : map) {
|
||||
for (auto i : map) {
|
||||
res.push_back(T());
|
||||
res.back().key = key;
|
||||
res.back().value = value;
|
||||
res.back().key = i.first;
|
||||
res.back().value = i.second;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
template<class T, class MAP>
|
||||
std::vector<T>& map_to_key_value(const MAP& map, std::vector<T>& res) {
|
||||
res.reserve(res.size() + std::size(map));
|
||||
|
||||
for (const auto& [key, value] : map) {
|
||||
for (auto i : map) {
|
||||
T val;
|
||||
val.key = fmt::to_string(key);
|
||||
val.value = fmt::to_string(value);
|
||||
val.key = boost::lexical_cast<std::string>(i.first);
|
||||
val.value = boost::lexical_cast<std::string>(i.second);
|
||||
res.push_back(val);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
template <typename T, typename S = T>
|
||||
T map_sum(T&& dest, const S& src) {
|
||||
for (const auto& i : src) {
|
||||
for (auto i : src) {
|
||||
dest[i.first] += i.second;
|
||||
}
|
||||
return std::move(dest);
|
||||
return dest;
|
||||
}
|
||||
|
||||
template <typename MAP>
|
||||
std::vector<sstring> map_keys(const MAP& map) {
|
||||
std::vector<sstring> res;
|
||||
res.reserve(std::size(map));
|
||||
|
||||
for (const auto& i : map) {
|
||||
res.push_back(fmt::to_string(i.first));
|
||||
res.push_back(boost::lexical_cast<std::string>(i.first));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
@@ -89,6 +91,13 @@ inline std::vector<sstring> split(const sstring& text, const char* separator) {
|
||||
return boost::split(tokens, text, boost::is_any_of(separator));
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a column family parameter
|
||||
*/
|
||||
inline std::vector<sstring> split_cf(const sstring& cf) {
|
||||
return split(cf, ",");
|
||||
}
|
||||
|
||||
/**
|
||||
* A helper function to sum values on an a distributed object that
|
||||
* has a get_stats method.
|
||||
@@ -145,14 +154,6 @@ future<json::json_return_type> sum_timer_stats(distributed<T>& d, utils::timed_
|
||||
});
|
||||
}
|
||||
|
||||
template<class T, class F>
|
||||
future<json::json_return_type> sum_timer_stats(distributed<T>& d, utils::timed_rate_moving_average_summary_and_histogram F::*f) {
|
||||
return d.map_reduce0([f](const T& p) {return (p.get_stats().*f).rate();}, utils::rate_moving_average_and_histogram(),
|
||||
std::plus<utils::rate_moving_average_and_histogram>()).then([](const utils::rate_moving_average_and_histogram& val) {
|
||||
return make_ready_future<json::json_return_type>(timer_to_json(val));
|
||||
});
|
||||
}
|
||||
|
||||
inline int64_t min_int64(int64_t a, int64_t b) {
|
||||
return std::min(a,b);
|
||||
}
|
||||
@@ -197,7 +198,7 @@ struct basic_ratio_holder : public json::jsonable {
|
||||
typedef basic_ratio_holder<double> ratio_holder;
|
||||
typedef basic_ratio_holder<int64_t> integral_ratio_holder;
|
||||
|
||||
class unimplemented_exception : public httpd::base_exception {
|
||||
class unimplemented_exception : public base_exception {
|
||||
public:
|
||||
unimplemented_exception()
|
||||
: base_exception("API call is not supported yet", reply::status_type::internal_server_error) {
|
||||
@@ -215,105 +216,4 @@ std::vector<T> concat(std::vector<T> a, std::vector<T>&& b) {
|
||||
return a;
|
||||
}
|
||||
|
||||
template <class T, class Base = T>
|
||||
class req_param {
|
||||
public:
|
||||
sstring name;
|
||||
sstring param;
|
||||
T value;
|
||||
|
||||
req_param(const request& req, sstring name, T default_val) : name(name) {
|
||||
param = req.get_query_param(name);
|
||||
if (param.empty()) {
|
||||
value = default_val;
|
||||
return;
|
||||
}
|
||||
try {
|
||||
// boost::lexical_cast does not use boolalpha. Converting a
|
||||
// true/false throws exceptions. We don't want that.
|
||||
if constexpr (std::is_same_v<Base, bool>) {
|
||||
// Cannot use boolalpha because we (probably) want to
|
||||
// accept 1 and 0 as well as true and false. And True. And fAlse.
|
||||
std::transform(param.begin(), param.end(), param.begin(), ::tolower);
|
||||
if (param == "true" || param == "1") {
|
||||
value = T(true);
|
||||
} else if (param == "false" || param == "0") {
|
||||
value = T(false);
|
||||
} else {
|
||||
throw boost::bad_lexical_cast{};
|
||||
}
|
||||
} else {
|
||||
value = T{boost::lexical_cast<Base>(param)};
|
||||
}
|
||||
} catch (boost::bad_lexical_cast&) {
|
||||
throw httpd::bad_param_exception(format("{} ({}): type error - should be {}", name, param, boost::units::detail::demangle(typeid(Base).name())));
|
||||
}
|
||||
}
|
||||
|
||||
operator T() const { return value; }
|
||||
};
|
||||
|
||||
using mandatory = bool_class<struct mandatory_tag>;
|
||||
|
||||
class req_params {
|
||||
public:
|
||||
struct def {
|
||||
std::optional<sstring> value;
|
||||
mandatory is_mandatory = mandatory::no;
|
||||
|
||||
def(std::optional<sstring> value_ = std::nullopt, mandatory is_mandatory_ = mandatory::no)
|
||||
: value(std::move(value_))
|
||||
, is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
|
||||
def(mandatory is_mandatory_)
|
||||
: is_mandatory(is_mandatory_)
|
||||
{ }
|
||||
};
|
||||
|
||||
private:
|
||||
std::unordered_map<sstring, def> params;
|
||||
|
||||
public:
|
||||
req_params(std::initializer_list<std::pair<sstring, def>> l) {
|
||||
for (const auto& [name, ent] : l) {
|
||||
add(std::move(name), std::move(ent));
|
||||
}
|
||||
}
|
||||
|
||||
void add(sstring name, def ent) {
|
||||
params.emplace(std::move(name), std::move(ent));
|
||||
}
|
||||
|
||||
void process(const request& req);
|
||||
|
||||
const std::optional<sstring>& get(const char* name) const {
|
||||
return params.at(name).value;
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
const std::optional<T> get_as(const char* name) const {
|
||||
return get(name);
|
||||
}
|
||||
|
||||
template <typename T = sstring>
|
||||
requires std::same_as<T, bool>
|
||||
const std::optional<bool> get_as(const char* name) const {
|
||||
auto value = get(name);
|
||||
if (!value) {
|
||||
return std::nullopt;
|
||||
}
|
||||
std::transform(value->begin(), value->end(), value->begin(), ::tolower);
|
||||
if (value == "true" || value == "yes" || value == "1") {
|
||||
return true;
|
||||
}
|
||||
if (value == "false" || value == "no" || value == "0") {
|
||||
return false;
|
||||
}
|
||||
throw boost::bad_lexical_cast{};
|
||||
}
|
||||
};
|
||||
|
||||
httpd::utils_json::estimated_histogram time_to_json_histogram(const utils::time_estimated_histogram& val);
|
||||
|
||||
}
|
||||
|
||||
145
api/api_init.hh
145
api/api_init.hh
@@ -3,71 +3,25 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <seastar/http/httpd.hh>
|
||||
#include <seastar/core/future.hh>
|
||||
|
||||
#include "replica/database_fwd.hh"
|
||||
#include "tasks/task_manager.hh"
|
||||
#include "seastarx.hh"
|
||||
|
||||
using request = http::request;
|
||||
using reply = http::reply;
|
||||
|
||||
namespace service {
|
||||
|
||||
class load_meter;
|
||||
class storage_proxy;
|
||||
class storage_service;
|
||||
class raft_group0_client;
|
||||
class raft_group_registry;
|
||||
|
||||
} // namespace service
|
||||
|
||||
class sstables_loader;
|
||||
|
||||
namespace streaming {
|
||||
class stream_manager;
|
||||
}
|
||||
|
||||
namespace gms {
|
||||
class inet_address;
|
||||
}
|
||||
|
||||
namespace locator {
|
||||
|
||||
class token_metadata;
|
||||
class shared_token_metadata;
|
||||
class snitch_ptr;
|
||||
|
||||
} // namespace locator
|
||||
|
||||
namespace cql_transport { class controller; }
|
||||
namespace db {
|
||||
class snapshot_ctl;
|
||||
class config;
|
||||
namespace view {
|
||||
class view_builder;
|
||||
}
|
||||
class system_keyspace;
|
||||
}
|
||||
namespace netw { class messaging_service; }
|
||||
class repair_service;
|
||||
|
||||
namespace gms {
|
||||
|
||||
class gossiper;
|
||||
|
||||
}
|
||||
|
||||
namespace auth { class service; }
|
||||
|
||||
namespace tasks {
|
||||
class task_manager;
|
||||
}
|
||||
#include "database.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "http/httpd.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -75,61 +29,24 @@ struct http_context {
|
||||
sstring api_dir;
|
||||
sstring api_doc;
|
||||
httpd::http_server_control http_server;
|
||||
distributed<replica::database>& db;
|
||||
|
||||
http_context(distributed<replica::database>& _db)
|
||||
: db(_db)
|
||||
{
|
||||
distributed<database>& db;
|
||||
distributed<service::storage_proxy>& sp;
|
||||
http_context(distributed<database>& _db,
|
||||
distributed<service::storage_proxy>& _sp)
|
||||
: db(_db), sp(_sp) {
|
||||
}
|
||||
};
|
||||
|
||||
future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg);
|
||||
future<> unset_server_config(http_context& ctx);
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
|
||||
future<> unset_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client&);
|
||||
future<> unset_server_storage_service(http_context& ctx);
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
||||
future<> unset_server_sstables_loader(http_context& ctx);
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb);
|
||||
future<> unset_server_view_builder(http_context& ctx);
|
||||
future<> set_server_repair(http_context& ctx, sharded<repair_service>& repair);
|
||||
future<> unset_server_repair(http_context& ctx);
|
||||
future<> set_transport_controller(http_context& ctx, cql_transport::controller& ctl);
|
||||
future<> unset_transport_controller(http_context& ctx);
|
||||
future<> set_thrift_controller(http_context& ctx);
|
||||
future<> unset_thrift_controller(http_context& ctx);
|
||||
future<> set_server_authorization_cache(http_context& ctx, sharded<auth::service> &auth_service);
|
||||
future<> unset_server_authorization_cache(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
future<> unset_server_snapshot(http_context& ctx);
|
||||
future<> set_server_token_metadata(http_context& ctx, sharded<locator::shared_token_metadata>& tm);
|
||||
future<> unset_server_token_metadata(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> unset_server_gossip(http_context& ctx);
|
||||
future<> set_server_column_family(http_context& ctx, sharded<db::system_keyspace>& sys_ks);
|
||||
future<> unset_server_column_family(http_context& ctx);
|
||||
future<> set_server_messaging_service(http_context& ctx, sharded<netw::messaging_service>& ms);
|
||||
future<> unset_server_messaging_service(http_context& ctx);
|
||||
future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_proxy>& proxy);
|
||||
future<> unset_server_storage_proxy(http_context& ctx);
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm);
|
||||
future<> unset_server_stream_manager(http_context& ctx);
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& p);
|
||||
future<> unset_hinted_handoff(http_context& ctx);
|
||||
future<> set_server_cache(http_context& ctx);
|
||||
future<> set_server_compaction_manager(http_context& ctx);
|
||||
future<> set_server_snitch(http_context& ctx);
|
||||
future<> set_server_storage_service(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx);
|
||||
future<> set_server_load_sstable(http_context& ctx);
|
||||
future<> set_server_messaging_service(http_context& ctx);
|
||||
future<> set_server_storage_proxy(http_context& ctx);
|
||||
future<> set_server_stream_manager(http_context& ctx);
|
||||
future<> set_server_gossip_settle(http_context& ctx);
|
||||
future<> set_server_done(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg);
|
||||
future<> unset_server_task_manager(http_context& ctx);
|
||||
future<> set_server_task_manager_test(http_context& ctx, sharded<tasks::task_manager>& tm);
|
||||
future<> unset_server_task_manager_test(http_context& ctx);
|
||||
future<> set_server_tasks_compaction_module(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
future<> unset_server_tasks_compaction_module(http_context& ctx);
|
||||
future<> set_server_raft(http_context&, sharded<service::raft_group_registry>&);
|
||||
future<> unset_server_raft(http_context&);
|
||||
future<> set_load_meter(http_context& ctx, service::load_meter& lm);
|
||||
future<> unset_load_meter(http_context& ctx);
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include "api/api-doc/authorization_cache.json.hh"
|
||||
|
||||
#include "api/authorization_cache.hh"
|
||||
#include "auth/service.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
void set_authorization_cache(http_context& ctx, routes& r, sharded<auth::service> &auth_service) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.set(r, [&auth_service] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await auth_service.invoke_on_all([] (auth::service& auth) -> future<> {
|
||||
auth.reset_authorization_cache();
|
||||
return make_ready_future<>();
|
||||
});
|
||||
|
||||
co_return json_void();
|
||||
});
|
||||
}
|
||||
|
||||
void unset_authorization_cache(http_context& ctx, routes& r) {
|
||||
httpd::authorization_cache_json::authorization_cache_reset.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2022-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
|
||||
namespace seastar::httpd {
|
||||
class routes;
|
||||
}
|
||||
|
||||
namespace auth {
|
||||
class service;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context;
|
||||
void set_authorization_cache(http_context& ctx, seastar::httpd::routes& r, seastar::sharded<auth::service> &auth_service);
|
||||
void unset_authorization_cache(http_context& ctx, seastar::httpd::routes& r);
|
||||
|
||||
}
|
||||
@@ -1,140 +1,151 @@
|
||||
/*
|
||||
* Copyright (C) 2015-present ScyllaDB
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "cache_service.hh"
|
||||
#include "api/api.hh"
|
||||
#include "api/api-doc/cache_service.json.hh"
|
||||
#include "column_family.hh"
|
||||
|
||||
namespace api {
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
namespace cs = httpd::cache_service_json;
|
||||
|
||||
void set_cache_service(http_context& ctx, routes& r) {
|
||||
cs::get_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// We never save the cache
|
||||
// Origin uses 0 for never
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_row_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto period = req->get_query_param("period");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// We never save the cache
|
||||
// Origin uses 0 for never
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_key_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto period = req->get_query_param("period");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// We never save the cache
|
||||
// Origin uses 0 for never
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_counter_cache_save_period_in_seconds.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto ccspis = req->get_query_param("ccspis");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_row_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_row_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_row_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_row_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto rckts = req->get_query_param("rckts");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_key_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_key_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_key_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_key_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto kckts = req->get_query_param("kckts");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_counter_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::set_counter_cache_keys_to_save.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_counter_cache_keys_to_save.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto cckts = req->get_query_param("cckts");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::invalidate_key_cache.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::invalidate_key_cache.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::invalidate_counter_cache.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::invalidate_counter_cache.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::set_row_cache_capacity_in_mb.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_row_cache_capacity_in_mb.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto capacity = req->get_query_param("capacity");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::set_key_cache_capacity_in_mb.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_key_cache_capacity_in_mb.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto period = req->get_query_param("period");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::set_counter_cache_capacity_in_mb.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::set_counter_cache_capacity_in_mb.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
auto capacity = req->get_query_param("capacity");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::save_caches.set(r, [](std::unique_ptr<http::request> req) {
|
||||
cs::save_caches.set(r, [](std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
cs::get_key_capacity.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_capacity.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -142,7 +153,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_hits.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_hits.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -150,7 +161,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_requests.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_requests.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -158,7 +169,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_hit_rate.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_hit_rate.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -166,21 +177,21 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_hits_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_key_requests_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_key_size.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_size.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -188,7 +199,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_key_entries.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_key_entries.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support keys cache,
|
||||
@@ -196,68 +207,62 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_row_capacity.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
return seastar::map_reduce(smp::all_cpus(), [] (int cpu) {
|
||||
return make_ready_future<uint64_t>(memory::stats().total_memory());
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
cs::get_row_capacity.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().get_cache_tracker().region().occupancy().used_space();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const replica::column_family& cf) {
|
||||
cs::get_row_hits.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.count();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_requests.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const replica::column_family& cf) {
|
||||
cs::get_row_requests.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t(0), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.count() + cf.get_row_cache().stats().misses.count();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_hit_rate.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [](const replica::column_family& cf) {
|
||||
cs::get_row_hit_rate.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, ratio_holder(), [](const column_family& cf) {
|
||||
return ratio_holder(cf.get_row_cache().stats().hits.count() + cf.get_row_cache().stats().misses.count(),
|
||||
cf.get_row_cache().stats().hits.count());
|
||||
}, std::plus<ratio_holder>());
|
||||
});
|
||||
|
||||
cs::get_row_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
cs::get_row_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_row_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
cs::get_row_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf_raw(ctx, utils::rate_moving_average(), [](const column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate() + cf.get_row_cache().stats().misses.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
});
|
||||
});
|
||||
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
cs::get_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// In origin row size is the weighted size.
|
||||
// We currently do not support weights, so we use raw size in bytes instead
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().region().occupancy().used_space();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
// We currently do not support weights, so we use num entries instead
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().partitions();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return ctx.db.map_reduce0([](replica::database& db) -> uint64_t {
|
||||
return db.row_cache_tracker().partitions();
|
||||
}, uint64_t(0), std::plus<uint64_t>()).then([](const int64_t& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
cs::get_row_entries.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
return map_reduce_cf(ctx, 0, [](const column_family& cf) {
|
||||
return cf.get_row_cache().partitions();
|
||||
}, std::plus<uint64_t>());
|
||||
});
|
||||
|
||||
cs::get_counter_capacity.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_capacity.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -265,7 +270,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_hits.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_hits.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -273,7 +278,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_requests.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_requests.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -281,7 +286,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_hit_rate.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_hit_rate.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -289,21 +294,21 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_hits_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_hits_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_counter_requests_moving_avrage.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_requests_moving_avrage.set(r, [&ctx] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// See above
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(utils::rate_moving_average()));
|
||||
});
|
||||
|
||||
cs::get_counter_size.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_size.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
@@ -311,7 +316,7 @@ void set_cache_service(http_context& ctx, routes& r) {
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cs::get_counter_entries.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
cs::get_counter_entries.set(r, [] (std::unique_ptr<request> req) {
|
||||
// TBD
|
||||
// FIXME
|
||||
// we don't support counter cache,
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
/*
|
||||
* Copyright (C) 2015-present ScyllaDB
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace seastar::httpd {
|
||||
class routes;
|
||||
}
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context;
|
||||
void set_cache_service(http_context& ctx, seastar::httpd::routes& r);
|
||||
void set_cache_service(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,18 +1,31 @@
|
||||
/*
|
||||
* Copyright (C) 2015-present ScyllaDB
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "collectd.hh"
|
||||
#include "api/api-doc/collectd.json.hh"
|
||||
#include <seastar/core/scollectd.hh>
|
||||
#include <seastar/core/scollectd_api.hh>
|
||||
#include "core/scollectd.hh"
|
||||
#include "core/scollectd_api.hh"
|
||||
#include "endian.h"
|
||||
#include <boost/range/irange.hpp>
|
||||
#include <regex>
|
||||
#include "api/api_init.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -29,11 +42,8 @@ static auto transformer(const std::vector<collectd_value>& values) {
|
||||
case scollectd::data_type::GAUGE:
|
||||
collected_value.values.push(v.d());
|
||||
break;
|
||||
case scollectd::data_type::COUNTER:
|
||||
collected_value.values.push(v.ui());
|
||||
break;
|
||||
case scollectd::data_type::REAL_COUNTER:
|
||||
collected_value.values.push(v.d());
|
||||
case scollectd::data_type::DERIVE:
|
||||
collected_value.values.push(v.i());
|
||||
break;
|
||||
default:
|
||||
collected_value.values.push(v.ui());
|
||||
@@ -52,9 +62,9 @@ static const char* str_to_regex(const sstring& v) {
|
||||
}
|
||||
|
||||
void set_collectd(http_context& ctx, routes& r) {
|
||||
cd::get_collectd.set(r, [](std::unique_ptr<request> req) {
|
||||
cd::get_collectd.set(r, [&ctx](std::unique_ptr<request> req) {
|
||||
|
||||
auto id = ::make_shared<scollectd::type_instance_id>(req->get_path_param("pluginid"),
|
||||
auto id = make_shared<scollectd::type_instance_id>(req->param["pluginid"],
|
||||
req->get_query_param("instance"), req->get_query_param("type"),
|
||||
req->get_query_param("type_instance"));
|
||||
|
||||
@@ -91,7 +101,7 @@ void set_collectd(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cd::enable_collectd.set(r, [](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
std::regex plugin(req->get_path_param("pluginid").c_str());
|
||||
std::regex plugin(req->param["pluginid"].c_str());
|
||||
std::regex instance(str_to_regex(req->get_query_param("instance")));
|
||||
std::regex type(str_to_regex(req->get_query_param("type")));
|
||||
std::regex type_instance(str_to_regex(req->get_query_param("type_instance")));
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
/*
|
||||
* Copyright (C) 2015-present ScyllaDB
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace seastar::httpd {
|
||||
class routes;
|
||||
}
|
||||
#include "api.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context;
|
||||
void set_collectd(http_context& ctx, seastar::httpd::routes& r);
|
||||
void set_collectd(http_context& ctx, routes& r);
|
||||
|
||||
}
|
||||
|
||||
1034
api/column_family.cc
1034
api/column_family.cc
File diff suppressed because it is too large
Load Diff
@@ -1,44 +1,45 @@
|
||||
/*
|
||||
* Copyright (C) 2015-present ScyllaDB
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "replica/database.hh"
|
||||
#include <seastar/json/json_elements.hh>
|
||||
#include <any>
|
||||
#include "api/api_init.hh"
|
||||
|
||||
namespace db {
|
||||
class system_keyspace;
|
||||
}
|
||||
#include "api.hh"
|
||||
#include "api/api-doc/column_family.json.hh"
|
||||
#include "database.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_column_family(http_context& ctx, httpd::routes& r, sharded<db::system_keyspace>& sys_ks);
|
||||
void unset_column_family(http_context& ctx, httpd::routes& r);
|
||||
void set_column_family(http_context& ctx, routes& r);
|
||||
|
||||
table_id get_uuid(const sstring& name, const replica::database& db);
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, std::function<void(replica::column_family&)> f);
|
||||
const utils::UUID& get_uuid(const sstring& name, const database& db);
|
||||
future<> foreach_column_family(http_context& ctx, const sstring& name, std::function<void(column_family&)> f);
|
||||
|
||||
|
||||
template<class Mapper, class I, class Reducer>
|
||||
future<I> map_reduce_cf_raw(http_context& ctx, const sstring& name, I init,
|
||||
Mapper mapper, Reducer reducer) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(replica::database&)>;
|
||||
using reducer_type = std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)>;
|
||||
return ctx.db.map_reduce0(mapper_type([mapper, uuid](replica::database& db) {
|
||||
return std::make_unique<std::any>(I(mapper(db.find_column_family(uuid))));
|
||||
}), std::make_unique<std::any>(std::move(init)), reducer_type([reducer = std::move(reducer)] (std::unique_ptr<std::any> a, std::unique_ptr<std::any> b) mutable {
|
||||
return std::make_unique<std::any>(I(reducer(std::any_cast<I>(std::move(*a)), std::any_cast<I>(std::move(*b)))));
|
||||
})).then([] (std::unique_ptr<std::any> r) {
|
||||
return std::any_cast<I>(std::move(*r));
|
||||
});
|
||||
return ctx.db.map_reduce0([mapper, uuid](database& db) {
|
||||
return mapper(db.find_column_family(uuid));
|
||||
}, init, reducer);
|
||||
}
|
||||
|
||||
|
||||
@@ -50,47 +51,35 @@ future<json::json_return_type> map_reduce_cf(http_context& ctx, const sstring& n
|
||||
});
|
||||
}
|
||||
|
||||
template<class Mapper, class I, class Reducer, class Result>
|
||||
future<I> map_reduce_cf_raw(http_context& ctx, const sstring& name, I init,
|
||||
Mapper mapper, Reducer reducer, Result result) {
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([mapper, uuid](database& db) {
|
||||
return mapper(db.find_column_family(uuid));
|
||||
}, init, reducer);
|
||||
}
|
||||
|
||||
|
||||
template<class Mapper, class I, class Reducer, class Result>
|
||||
future<json::json_return_type> map_reduce_cf(http_context& ctx, const sstring& name, I init,
|
||||
Mapper mapper, Reducer reducer, Result result) {
|
||||
return map_reduce_cf_raw(ctx, name, init, mapper, reducer).then([result](const I& res) mutable {
|
||||
return map_reduce_cf_raw(ctx, name, init, mapper, reducer, result).then([result](const I& res) mutable {
|
||||
result = res;
|
||||
return make_ready_future<json::json_return_type>(result);
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> map_reduce_cf_time_histogram(http_context& ctx, const sstring& name, std::function<utils::time_estimated_histogram(const replica::column_family&)> f);
|
||||
|
||||
struct map_reduce_column_families_locally {
|
||||
std::any init;
|
||||
std::function<std::unique_ptr<std::any>(replica::column_family&)> mapper;
|
||||
std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)> reducer;
|
||||
future<std::unique_ptr<std::any>> operator()(replica::database& db) const {
|
||||
auto res = seastar::make_lw_shared<std::unique_ptr<std::any>>(std::make_unique<std::any>(init));
|
||||
return db.get_tables_metadata().for_each_table_gently([res, this] (table_id, seastar::lw_shared_ptr<replica::table> table) {
|
||||
*res = reducer(std::move(*res), mapper(*table.get()));
|
||||
return make_ready_future();
|
||||
}).then([res] () {
|
||||
return std::move(*res);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
template<class Mapper, class I, class Reducer>
|
||||
future<I> map_reduce_cf_raw(http_context& ctx, I init,
|
||||
Mapper mapper, Reducer reducer) {
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(replica::column_family&)>;
|
||||
using reducer_type = std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)>;
|
||||
auto wrapped_mapper = mapper_type([mapper = std::move(mapper)] (replica::column_family& cf) mutable {
|
||||
return std::make_unique<std::any>(I(mapper(cf)));
|
||||
});
|
||||
auto wrapped_reducer = reducer_type([reducer = std::move(reducer)] (std::unique_ptr<std::any> a, std::unique_ptr<std::any> b) mutable {
|
||||
return std::make_unique<std::any>(I(reducer(std::any_cast<I>(std::move(*a)), std::any_cast<I>(std::move(*b)))));
|
||||
});
|
||||
return ctx.db.map_reduce0(map_reduce_column_families_locally{init,
|
||||
std::move(wrapped_mapper), wrapped_reducer}, std::make_unique<std::any>(init), wrapped_reducer).then([] (std::unique_ptr<std::any> res) {
|
||||
return std::any_cast<I>(std::move(*res));
|
||||
});
|
||||
return ctx.db.map_reduce0([mapper, init, reducer](database& db) {
|
||||
auto res = init;
|
||||
for (auto i : db.get_column_families()) {
|
||||
res = reducer(res, mapper(*i.second.get()));
|
||||
}
|
||||
return res;
|
||||
}, init, reducer);
|
||||
}
|
||||
|
||||
|
||||
@@ -103,12 +92,9 @@ future<json::json_return_type> map_reduce_cf(http_context& ctx, I init,
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
|
||||
int64_t replica::column_family_stats::*f);
|
||||
int64_t column_family::stats::*f);
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
int64_t replica::column_family_stats::*f);
|
||||
|
||||
|
||||
std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name);
|
||||
int64_t column_family::stats::*f);
|
||||
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user