Compare commits
1 Commits
auto-backp
...
dani-tweig
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ec85bf4e24 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -2,4 +2,3 @@
|
||||
*.hh diff=cpp
|
||||
*.svg binary
|
||||
docs/_static/api/js/* binary
|
||||
pgo/profiles/** filter=lfs diff=lfs merge=lfs -text
|
||||
|
||||
63
.github/scripts/auto-backport.py
vendored
63
.github/scripts/auto-backport.py
vendored
@@ -29,11 +29,10 @@ def parse_args():
|
||||
parser.add_argument('--commits', default=None, type=str, help='Range of promoted commits.')
|
||||
parser.add_argument('--pull-request', type=int, help='Pull request number to be backported')
|
||||
parser.add_argument('--head-commit', type=str, required=is_pull_request(), help='The HEAD of target branch after the pull request specified by --pull-request is merged')
|
||||
parser.add_argument('--github-event', type=str, help='Get GitHub event type')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def create_pull_request(repo, new_branch_name, base_branch_name, pr, backport_pr_title, commits, is_draft, is_collaborator):
|
||||
def create_pull_request(repo, new_branch_name, base_branch_name, pr, backport_pr_title, commits, is_draft=False):
|
||||
pr_body = f'{pr.body}\n\n'
|
||||
for commit in commits:
|
||||
pr_body += f'- (cherry picked from commit {commit})\n\n'
|
||||
@@ -47,11 +46,10 @@ def create_pull_request(repo, new_branch_name, base_branch_name, pr, backport_pr
|
||||
draft=is_draft
|
||||
)
|
||||
logging.info(f"Pull request created: {backport_pr.html_url}")
|
||||
if is_collaborator:
|
||||
backport_pr.add_to_assignees(pr.user)
|
||||
backport_pr.add_to_assignees(pr.user)
|
||||
if is_draft:
|
||||
backport_pr.add_to_labels("conflicts")
|
||||
pr_comment = f"@{pr.user.login} - This PR was marked as draft because it has conflicts\n"
|
||||
pr_comment = f"@{pr.user} - This PR was marked as draft because it has conflicts\n"
|
||||
pr_comment += "Please resolve them and mark this PR as ready for review"
|
||||
backport_pr.create_issue_comment(pr_comment)
|
||||
logging.info(f"Assigned PR to original author: {pr.user}")
|
||||
@@ -68,8 +66,7 @@ def get_pr_commits(repo, pr, stable_branch, start_commit=None):
|
||||
if pr.merged:
|
||||
merge_commit = repo.get_commit(pr.merge_commit_sha)
|
||||
if len(merge_commit.parents) > 1: # Check if this merge commit includes multiple commits
|
||||
for commit in pr.get_commits():
|
||||
commits.append(commit.sha)
|
||||
commits.append(pr.merge_commit_sha)
|
||||
else:
|
||||
if start_commit:
|
||||
promoted_commits = repo.compare(start_commit, stable_branch).commits
|
||||
@@ -94,7 +91,18 @@ def get_pr_commits(repo, pr, stable_branch, start_commit=None):
|
||||
return commits
|
||||
|
||||
|
||||
def backport(repo, pr, version, commits, backport_base_branch, is_collaborator):
|
||||
def create_pr_comment_and_remove_label(pr, comment_body):
|
||||
labels = pr.get_labels()
|
||||
pattern = re.compile(r"backport/\d+\.\d+$")
|
||||
for label in labels:
|
||||
if pattern.match(label.name):
|
||||
print(f"Removing label: {label.name}")
|
||||
comment_body += f'- {label.name}\n'
|
||||
pr.remove_from_labels(label)
|
||||
pr.create_issue_comment(comment_body)
|
||||
|
||||
|
||||
def backport(repo, pr, version, commits, backport_base_branch):
|
||||
new_branch_name = f'backport/{pr.number}/to-{version}'
|
||||
backport_pr_title = f'[Backport {version}] {pr.title}'
|
||||
repo_url = f'https://scylladbbot:{github_token}@github.com/{repo.full_name}.git'
|
||||
@@ -106,7 +114,7 @@ def backport(repo, pr, version, commits, backport_base_branch, is_collaborator):
|
||||
is_draft = False
|
||||
for commit in commits:
|
||||
try:
|
||||
repo_local.git.cherry_pick(commit, '-x')
|
||||
repo_local.git.cherry_pick(commit, '-m1', '-x')
|
||||
except GitCommandError as e:
|
||||
logging.warning(f'Cherry-pick conflict on commit {commit}: {e}')
|
||||
is_draft = True
|
||||
@@ -114,7 +122,7 @@ def backport(repo, pr, version, commits, backport_base_branch, is_collaborator):
|
||||
repo_local.git.cherry_pick('--continue')
|
||||
repo_local.git.push(fork_repo, new_branch_name, force=True)
|
||||
create_pull_request(repo, new_branch_name, backport_base_branch, pr, backport_pr_title, commits,
|
||||
is_draft, is_collaborator)
|
||||
is_draft=is_draft)
|
||||
|
||||
except GitCommandError as e:
|
||||
logging.warning(f"GitCommandError: {e}")
|
||||
@@ -123,14 +131,12 @@ def backport(repo, pr, version, commits, backport_base_branch, is_collaborator):
|
||||
def with_github_keyword_prefix(repo, pr):
|
||||
pattern = rf"(?:fix(?:|es|ed))\s*:?\s*(?:(?:(?:{repo.full_name})?#)|https://github\.com/{repo.full_name}/issues/)(\d+)"
|
||||
match = re.findall(pattern, pr.body, re.IGNORECASE)
|
||||
if not match:
|
||||
for commit in pr.get_commits():
|
||||
match = re.findall(pattern, commit.commit.message, re.IGNORECASE)
|
||||
if match:
|
||||
print(f'{pr.number} has a valid close reference in commit message {commit.sha}')
|
||||
break
|
||||
if not match:
|
||||
print(f'No valid close reference for {pr.number}')
|
||||
comment = f':warning: @{pr.user.login} PR body does not contain a Fixes reference to an issue '
|
||||
comment += ' and can not be backported\n\n'
|
||||
comment += 'The following labels were removed:\n'
|
||||
create_pr_comment_and_remove_label(pr, comment)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@@ -155,7 +161,6 @@ def main():
|
||||
scylladbbot_repo = g.get_repo(fork_repo_name)
|
||||
closed_prs = []
|
||||
start_commit = None
|
||||
is_collaborator = True
|
||||
|
||||
if args.commits:
|
||||
start_commit, end_commit = args.commits.split('..')
|
||||
@@ -180,33 +185,21 @@ def main():
|
||||
if not backport_labels:
|
||||
print(f'no backport label: {pr.number}')
|
||||
continue
|
||||
if not with_github_keyword_prefix(repo, pr) and args.github_event != 'unlabeled':
|
||||
comment = f''':warning: @{pr.user.login} PR body or PR commits do not contain a Fixes reference to an issue and can not be backported
|
||||
please update PR body with a valid ref to an issue. Then remove `scylladbbot/backport_error` label to re-trigger the backport process
|
||||
'''
|
||||
pr.create_issue_comment(comment)
|
||||
pr.add_to_labels("scylladbbot/backport_error")
|
||||
if args.commits and not with_github_keyword_prefix(repo, pr):
|
||||
continue
|
||||
if not repo.private and not scylladbbot_repo.has_in_collaborators(pr.user.login):
|
||||
logging.info(f"Sending an invite to {pr.user.login} to become a collaborator to {scylladbbot_repo.full_name} ")
|
||||
scylladbbot_repo.add_to_collaborators(pr.user.login)
|
||||
comment = f''':warning: @{pr.user.login} you have been added as collaborator to scylladbbot fork
|
||||
Please check your inbox and approve the invitation, otherwise you will not be able to edit PR branch when needed
|
||||
'''
|
||||
# When a pull request is pending for backport but its author is not yet a collaborator of "scylladbbot",
|
||||
# we attach a "scylladbbot/backport_error" label to the PR.
|
||||
# This prevents the workflow from proceeding with the backport process
|
||||
# until the author has been granted proper permissions
|
||||
# the author should remove the label manually to re-trigger the backport workflow.
|
||||
pr.add_to_labels("scylladbbot/backport_error")
|
||||
pr.create_issue_comment(comment)
|
||||
is_collaborator = False
|
||||
comment = f':warning: @{pr.user.login} you have been added as collaborator to scylladbbot fork '
|
||||
comment += f'Please check your inbox and approve the invitation, once it is done, please add the backport labels again\n'
|
||||
create_pr_comment_and_remove_label(pr, comment)
|
||||
continue
|
||||
commits = get_pr_commits(repo, pr, stable_branch, start_commit)
|
||||
logging.info(f"Found PR #{pr.number} with commit {commits} and the following labels: {backport_labels}")
|
||||
for backport_label in backport_labels:
|
||||
version = backport_label.replace('backport/', '')
|
||||
backport_base_branch = backport_label.replace('backport/', backport_branch)
|
||||
backport(repo, pr, version, commits, backport_base_branch, is_collaborator)
|
||||
backport(repo, pr, version, commits, backport_base_branch)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
81
.github/scripts/check-license.py
vendored
81
.github/scripts/check-license.py
vendored
@@ -1,81 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2024-present ScyllaDB
|
||||
#
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Set
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parses command-line arguments."""
|
||||
parser = argparse.ArgumentParser(description='Check license headers in files')
|
||||
parser.add_argument('--files', required=True, nargs="+", type=Path,
|
||||
help='List of files to check')
|
||||
parser.add_argument('--license', required=True,
|
||||
help='License to check for')
|
||||
parser.add_argument('--check-lines', type=int, default=10,
|
||||
help='Number of lines to check (default: %(default)s)')
|
||||
parser.add_argument('--extensions', required=True, nargs="+",
|
||||
help='List of file extensions to check')
|
||||
parser.add_argument('--verbose', action='store_true',
|
||||
help='Print verbose output (default: %(default)s)')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def should_check_file(file_path: Path, allowed_extensions: Set[str]) -> bool:
|
||||
return file_path.suffix in allowed_extensions
|
||||
|
||||
|
||||
def check_license_header(file_path: Path, license_header: str, check_lines: int) -> bool:
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
for _ in range(check_lines):
|
||||
line = f.readline()
|
||||
if license_header in line:
|
||||
return True
|
||||
return False
|
||||
except (UnicodeDecodeError, StopIteration):
|
||||
# Handle files that can't be read as text or have fewer lines
|
||||
return False
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
|
||||
if not args.files:
|
||||
print("No files to check")
|
||||
return 0
|
||||
|
||||
num_errors = 0
|
||||
|
||||
for file_path in args.files:
|
||||
# Skip non-existent files
|
||||
if not file_path.exists():
|
||||
continue
|
||||
|
||||
# Skip files with non-matching extensions
|
||||
if not should_check_file(file_path, args.extensions):
|
||||
print(f"ℹ️ Skipping file with unchecked extension: {file_path}")
|
||||
continue
|
||||
|
||||
# Check license header
|
||||
if check_license_header(file_path, args.license, args.check_lines):
|
||||
if args.verbose:
|
||||
print(f"✅ License header found in: {file_path}")
|
||||
else:
|
||||
print(f"❌ Missing license header in: {file_path}")
|
||||
num_errors += 1
|
||||
|
||||
if num_errors > 0:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
31
.github/workflows/add-label-when-promoted.yaml
vendored
31
.github/workflows/add-label-when-promoted.yaml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
- branch-*.*
|
||||
- enterprise
|
||||
pull_request_target:
|
||||
types: [labeled, unlabeled]
|
||||
types: [labeled]
|
||||
branches: [master, next, enterprise]
|
||||
|
||||
jobs:
|
||||
@@ -53,28 +53,19 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
run: python .github/scripts/auto-backport.py --repo ${{ github.repository }} --base-branch ${{ github.ref }} --commits ${{ github.event.before }}..${{ github.sha }}
|
||||
- name: Check if a valid backport label exists and no backport_error
|
||||
- name: Check if label starts with 'backport/' and contains digits
|
||||
id: check_label
|
||||
run: |
|
||||
labels_json='${{ toJson(github.event.pull_request.labels) }}'
|
||||
echo "Checking labels: $(echo "$labels_json" | jq -r '.[].name')"
|
||||
|
||||
# Check if a valid backport label exists
|
||||
if echo "$labels_json" | jq -e 'any(.[] | .name; test("backport/[0-9]+\\.[0-9]+$"))' > /dev/null; then
|
||||
# Ensure scylladbbot/backport_error is NOT present
|
||||
if ! echo "$labels_json" | jq -e '.[] | select(.name == "scylladbbot/backport_error")' > /dev/null; then
|
||||
echo "A matching backport label was found and no backport_error label exists."
|
||||
echo "ready_for_backport=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
else
|
||||
echo "The label 'scylladbbot/backport_error' is present, invalidating backport."
|
||||
fi
|
||||
label_name="${{ github.event.label.name }}"
|
||||
if [[ "$label_name" =~ ^backport/[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Label matches backport/X.X pattern."
|
||||
echo "backport_label=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No matching backport label found."
|
||||
echo "Label does not match the required pattern."
|
||||
echo "backport_label=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
echo "ready_for_backport=false" >> "$GITHUB_OUTPUT"
|
||||
- name: Run auto-backport.py when PR is closed
|
||||
if: ${{ github.event_name == 'pull_request_target' && steps.check_label.outputs.ready_for_backport == 'true' && github.event.pull_request.state == 'closed' }}
|
||||
- name: Run auto-backport.py when label was added
|
||||
if: ${{ github.event_name == 'pull_request_target' && steps.check_label.outputs.backport_label == 'true' && github.event.pull_request.state == 'closed' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
run: python .github/scripts/auto-backport.py --repo ${{ github.repository }} --base-branch ${{ github.ref }} --pull-request ${{ github.event.pull_request.number }} --head-commit ${{ github.event.pull_request.base.sha }} --github-event ${{ github.event.action }}
|
||||
run: python .github/scripts/auto-backport.py --repo ${{ github.repository }} --base-branch ${{ github.ref }} --pull-request ${{ github.event.pull_request.number }} --head-commit ${{ github.event.pull_request.base.sha }}
|
||||
|
||||
52
.github/workflows/check-license-header.yaml
vendored
52
.github/workflows/check-license-header.yaml
vendored
@@ -1,52 +0,0 @@
|
||||
name: License Header Check
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize, reopened]
|
||||
branches: [master]
|
||||
|
||||
env:
|
||||
HEADER_CHECK_LINES: 10
|
||||
LICENSE: "LicenseRef-ScyllaDB-Source-Available-1.0"
|
||||
CHECKED_EXTENSIONS: ".cc .hh .py"
|
||||
|
||||
jobs:
|
||||
check-license-headers:
|
||||
name: Check License Headers
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
run: |
|
||||
# Get list of added files comparing with base branch
|
||||
echo "files=$(git diff --name-only --diff-filter=A ${{ github.event.pull_request.base.sha }} ${{ github.sha }} | tr '\n' ' ')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Check license headers
|
||||
if: steps.changed-files.outputs.files != ''
|
||||
run: |
|
||||
.github/scripts/check-license.py \
|
||||
--files ${{ steps.changed-files.outputs.files }} \
|
||||
--license "${{ env.LICENSE }}" \
|
||||
--check-lines "${{ env.HEADER_CHECK_LINES }}" \
|
||||
--extensions ${{ env.CHECKED_EXTENSIONS }}
|
||||
|
||||
- name: Comment on PR if check fails
|
||||
if: failure()
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const license = '${{ env.LICENSE }}';
|
||||
await github.rest.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: `❌ License header check failed. Please ensure all new files include the header within the first ${{ env.HEADER_CHECK_LINES }} lines:\n\`\`\`\n${license}\n\`\`\`\nSee action logs for details.`
|
||||
});
|
||||
2
.github/workflows/clang-nightly.yaml
vendored
2
.github/workflows/clang-nightly.yaml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
|
||||
env:
|
||||
# use the development branch explicitly
|
||||
CLANG_VERSION: 21
|
||||
CLANG_VERSION: 20
|
||||
BUILD_DIR: build
|
||||
|
||||
permissions: {}
|
||||
|
||||
1
.github/workflows/clang-tidy.yaml
vendored
1
.github/workflows/clang-tidy.yaml
vendored
@@ -34,7 +34,6 @@ jobs:
|
||||
name: Run clang-tidy
|
||||
needs:
|
||||
- read-toolchain
|
||||
if: "${{ needs.read-toolchain.result == 'success' }}"
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{ needs.read-toolchain.outputs.image }}
|
||||
steps:
|
||||
|
||||
2
.github/workflows/iwyu.yaml
vendored
2
.github/workflows/iwyu.yaml
vendored
@@ -11,7 +11,7 @@ env:
|
||||
CLEANER_OUTPUT_PATH: build/clang-include-cleaner.log
|
||||
# the "idl" subdirectory does not contain C++ source code. the .hh files in it are
|
||||
# supposed to be processed by idl-compiler.py, so we don't check them using the cleaner
|
||||
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation mutation_writer node_ops redis replica
|
||||
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation
|
||||
|
||||
permissions: {}
|
||||
|
||||
|
||||
22
.github/workflows/make-pr-ready-for-review.yaml
vendored
22
.github/workflows/make-pr-ready-for-review.yaml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Mark PR as Ready When Conflicts Label is Removed
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- unlabeled
|
||||
|
||||
env:
|
||||
DEFAULT_BRANCH: 'master'
|
||||
|
||||
jobs:
|
||||
mark-ready:
|
||||
if: github.event.label.name == 'conflicts'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Mark pull request as ready for review
|
||||
run: gh pr ready "${{ github.event.pull_request.number }}"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
||||
@@ -17,6 +17,6 @@ jobs:
|
||||
with:
|
||||
mode: minimum
|
||||
count: 1
|
||||
labels: "backport/none\nbackport/\\d{4}\\.\\d+\nbackport/\\d+\\.\\d+"
|
||||
labels: "backport/none\nbackport/\\d.\\d"
|
||||
use_regex: true
|
||||
add_comment: false
|
||||
|
||||
7
.github/workflows/seastar.yaml
vendored
7
.github/workflows/seastar.yaml
vendored
@@ -15,13 +15,10 @@ env:
|
||||
BUILD_DIR: build
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
uses: ./.github/workflows/read-toolchain.yaml
|
||||
build-with-the-latest-seastar:
|
||||
needs:
|
||||
- read-toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{ needs.read-toolchain.outputs.image }}
|
||||
# be consistent with tools/toolchain/image
|
||||
container: scylladb/scylla-toolchain:fedora-40-20240621
|
||||
strategy:
|
||||
matrix:
|
||||
build_type:
|
||||
|
||||
50
.github/workflows/trigger_jenkins.yaml
vendored
50
.github/workflows/trigger_jenkins.yaml
vendored
@@ -1,50 +0,0 @@
|
||||
name: Trigger next gating
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- next**
|
||||
|
||||
jobs:
|
||||
trigger-jenkins:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Determine Jenkins Job Name
|
||||
run: |
|
||||
if [[ "${{ github.ref_name }}" == "next" ]]; then
|
||||
FOLDER_NAME="scylla-master"
|
||||
elif [[ "${{ github.ref_name }}" == "next-enterprise" ]]; then
|
||||
FOLDER_NAME="scylla-enterprise"
|
||||
else
|
||||
VERSION=$(echo "${{ github.ref_name }}" | awk -F'-' '{print $2}')
|
||||
if [[ "$VERSION" =~ ^202[0-4]\.[0-9]+$ ]]; then
|
||||
FOLDER_NAME="enterprise-$VERSION"
|
||||
elif [[ "$VERSION" =~ ^[0-9]+\.[0-9]+$ ]]; then
|
||||
FOLDER_NAME="scylla-$VERSION"
|
||||
fi
|
||||
fi
|
||||
echo "JOB_NAME=${FOLDER_NAME}/job/next" >> $GITHUB_ENV
|
||||
|
||||
- name: Trigger Jenkins Job
|
||||
env:
|
||||
JENKINS_USER: ${{ secrets.JENKINS_USERNAME }}
|
||||
JENKINS_API_TOKEN: ${{ secrets.JENKINS_TOKEN }}
|
||||
JENKINS_URL: "https://jenkins.scylladb.com"
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
|
||||
run: |
|
||||
echo "Triggering Jenkins Job: $JOB_NAME"
|
||||
if ! curl -X POST "$JENKINS_URL/job/$JOB_NAME/buildWithParameters" --fail --user "$JENKINS_USER:$JENKINS_API_TOKEN" -i -v; then
|
||||
echo "Error: Jenkins job trigger failed"
|
||||
|
||||
# Send Slack message
|
||||
curl -X POST -H 'Content-type: application/json' \
|
||||
-H "Authorization: Bearer $SLACK_BOT_TOKEN" \
|
||||
--data '{
|
||||
"channel": "#releng-team",
|
||||
"text": "🚨 @here '$JOB_NAME' failed to be triggered, please check https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} for more details",
|
||||
"icon_emoji": ":warning:"
|
||||
}' \
|
||||
https://slack.com/api/chat.postMessage
|
||||
|
||||
exit 1
|
||||
fi
|
||||
@@ -22,8 +22,6 @@ if(DEFINED CMAKE_BUILD_TYPE)
|
||||
endif()
|
||||
endif(DEFINED CMAKE_BUILD_TYPE)
|
||||
|
||||
option(Scylla_ENABLE_LTO "Turn on link-time optimization for the 'release' mode." ON)
|
||||
|
||||
include(mode.common)
|
||||
get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(is_multi_config)
|
||||
@@ -44,7 +42,6 @@ else()
|
||||
endif()
|
||||
|
||||
include(limit_jobs)
|
||||
|
||||
# Configure Seastar compile options to align with Scylla
|
||||
set(CMAKE_CXX_STANDARD "23" CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_EXTENSIONS ON CACHE INTERNAL "")
|
||||
@@ -66,25 +63,24 @@ if(is_multi_config)
|
||||
# establishing proper dependencies between them
|
||||
include(ExternalProject)
|
||||
|
||||
# should be consistent with configure_seastar() in configure.py
|
||||
set(seastar_build_dir "${CMAKE_BINARY_DIR}/$<CONFIG>/seastar")
|
||||
ExternalProject_Add(Seastar
|
||||
SOURCE_DIR "${PROJECT_SOURCE_DIR}/seastar"
|
||||
BINARY_DIR "${CMAKE_BINARY_DIR}/$<CONFIG>/seastar"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} --build "${seastar_build_dir}"
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} --build <BINARY_DIR>
|
||||
--target seastar
|
||||
--target seastar_testing
|
||||
--target seastar_perf_testing
|
||||
--target app_iotune
|
||||
BUILD_ALWAYS ON
|
||||
BUILD_BYPRODUCTS
|
||||
${seastar_build_dir}/libseastar.$<IF:$<CONFIG:Debug,Dev>,so,a>
|
||||
${seastar_build_dir}/libseastar_testing.$<IF:$<CONFIG:Debug,Dev>,so,a>
|
||||
${seastar_build_dir}/libseastar_perf_testing.$<IF:$<CONFIG:Debug,Dev>,so,a>
|
||||
${seastar_build_dir}/apps/iotune/iotune
|
||||
${seastar_build_dir}/gen/include/seastar/http/chunk_parsers.hh
|
||||
${seastar_build_dir}/gen/include/seastar/http/request_parser.hh
|
||||
${seastar_build_dir}/gen/include/seastar/http/response_parser.hh
|
||||
<BINARY_DIR>/libseastar.$<IF:$<CONFIG:Debug,Dev>,so,a>
|
||||
<BINARY_DIR>/libseastar_testing.$<IF:$<CONFIG:Debug,Dev>,so,a>
|
||||
<BINARY_DIR>/libseastar_perf_testing.$<IF:$<CONFIG:Debug,Dev>,so,a>
|
||||
<BINARY_DIR>/apps/iotune/iotune
|
||||
<BINARY_DIR>/gen/include/seastar/http/chunk_parsers.hh
|
||||
<BINARY_DIR>/gen/include/seastar/http/request_parser.hh
|
||||
<BINARY_DIR>/gen/include/seastar/http/response_parser.hh
|
||||
INSTALL_COMMAND "")
|
||||
add_dependencies(Seastar::seastar Seastar)
|
||||
add_dependencies(Seastar::seastar_testing Seastar)
|
||||
@@ -96,7 +92,7 @@ else()
|
||||
set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_EXCLUDE_TESTS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_IO_URING ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_SCHEDULING_GROUPS_COUNT 19 CACHE STRING "" FORCE)
|
||||
set(Seastar_SCHEDULING_GROUPS_COUNT 16 CACHE STRING "" FORCE)
|
||||
set(Seastar_UNUSED_RESULT_ERROR ON CACHE BOOL "" FORCE)
|
||||
add_subdirectory(seastar)
|
||||
target_compile_definitions (seastar
|
||||
@@ -106,17 +102,13 @@ endif()
|
||||
|
||||
set(ABSL_PROPAGATE_CXX_STD ON CACHE BOOL "" FORCE)
|
||||
|
||||
if(Scylla_ENABLE_LTO)
|
||||
list(APPEND absl_cxx_flags $<$<CONFIG:RelWithDebInfo>:${CMAKE_CXX_COMPILE_OPTIONS_IPO};-ffat-lto-objects>)
|
||||
endif()
|
||||
|
||||
find_package(Sanitizers QUIET)
|
||||
list(APPEND absl_cxx_flags
|
||||
set(sanitizer_cxx_flags
|
||||
$<$<CONFIG:Debug,Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>>)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND ABSL_GCC_FLAGS ${absl_cxx_flags})
|
||||
set(ABSL_GCC_FLAGS ${sanitizer_cxx_flags})
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
list(APPEND ABSL_LLVM_FLAGS ${absl_cxx_flags})
|
||||
set(ABSL_LLVM_FLAGS ${sanitizer_cxx_flags})
|
||||
endif()
|
||||
set(ABSL_DEFAULT_LINKOPTS
|
||||
$<$<CONFIG:Debug,Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_LINK_LIBRARIES>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_LINK_LIBRARIES>>)
|
||||
@@ -149,13 +141,11 @@ find_package(ICU COMPONENTS uc i18n REQUIRED)
|
||||
find_package(fmt 10.0.0 REQUIRED)
|
||||
find_package(libdeflate REQUIRED)
|
||||
find_package(libxcrypt REQUIRED)
|
||||
find_package(p11-kit REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
find_package(RapidJSON REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
find_package(yaml-cpp REQUIRED)
|
||||
find_package(zstd REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
|
||||
set(scylla_gen_build_dir "${CMAKE_BINARY_DIR}/gen")
|
||||
file(MAKE_DIRECTORY "${scylla_gen_build_dir}")
|
||||
@@ -201,7 +191,7 @@ target_sources(scylla-main
|
||||
tombstone_gc_options.cc
|
||||
tombstone_gc.cc
|
||||
reader_concurrency_semaphore.cc
|
||||
reader_concurrency_semaphore_group.cc
|
||||
row_cache.cc
|
||||
schema_mutations.cc
|
||||
serializer.cc
|
||||
sstables_loader.cc
|
||||
@@ -223,10 +213,7 @@ target_link_libraries(scylla-main
|
||||
Seastar::seastar
|
||||
Snappy::snappy
|
||||
systemd
|
||||
ZLIB::ZLIB
|
||||
lz4::lz4_static
|
||||
zstd::zstd_static
|
||||
)
|
||||
ZLIB::ZLIB)
|
||||
|
||||
option(Scylla_CHECK_HEADERS
|
||||
"Add check-headers target for checking the self-containness of headers")
|
||||
@@ -261,7 +248,6 @@ add_custom_target(compiler-training)
|
||||
|
||||
add_subdirectory(api)
|
||||
add_subdirectory(alternator)
|
||||
add_subdirectory(audit)
|
||||
add_subdirectory(db)
|
||||
add_subdirectory(auth)
|
||||
add_subdirectory(cdc)
|
||||
@@ -269,7 +255,6 @@ add_subdirectory(compaction)
|
||||
add_subdirectory(cql3)
|
||||
add_subdirectory(data_dictionary)
|
||||
add_subdirectory(dht)
|
||||
add_subdirectory(ent)
|
||||
add_subdirectory(gms)
|
||||
add_subdirectory(idl)
|
||||
add_subdirectory(index)
|
||||
@@ -300,8 +285,7 @@ add_version_library(scylla_version
|
||||
|
||||
add_executable(scylla
|
||||
main.cc)
|
||||
set(scylla_libs
|
||||
audit
|
||||
target_link_libraries(scylla PRIVATE
|
||||
scylla-main
|
||||
api
|
||||
auth
|
||||
@@ -312,12 +296,10 @@ set(scylla_libs
|
||||
cql3
|
||||
data_dictionary
|
||||
dht
|
||||
encryption
|
||||
gms
|
||||
idl
|
||||
index
|
||||
lang
|
||||
ldap
|
||||
locator
|
||||
message
|
||||
mutation
|
||||
@@ -338,18 +320,8 @@ set(scylla_libs
|
||||
transport
|
||||
types
|
||||
utils)
|
||||
target_link_libraries(scylla PRIVATE
|
||||
${scylla_libs})
|
||||
|
||||
if(Scylla_ENABLE_LTO)
|
||||
include(enable_lto)
|
||||
foreach(target scylla ${scylla_libs})
|
||||
enable_lto(${target})
|
||||
endforeach()
|
||||
endif()
|
||||
|
||||
target_link_libraries(scylla PRIVATE
|
||||
p11-kit::p11-kit
|
||||
Seastar::seastar
|
||||
absl::headers
|
||||
yaml-cpp::yaml-cpp
|
||||
@@ -367,7 +339,3 @@ add_dependencies(compiler-training
|
||||
if(Scylla_DIST)
|
||||
add_subdirectory(dist)
|
||||
endif()
|
||||
|
||||
if(Scylla_BUILD_INSTRUMENTED)
|
||||
add_subdirectory(pgo)
|
||||
endif()
|
||||
|
||||
@@ -12,7 +12,7 @@ Please use the [issue tracker](https://github.com/scylladb/scylla/issues/) to re
|
||||
|
||||
## Contributing code to Scylla
|
||||
|
||||
Before you can contribute code to Scylla for the first time, you should sign the [Contributor License Agreement](https://www.scylladb.com/open-source/contributor-agreement/) and send the signed form cla@scylladb.com. You can then submit your changes as patches to the [scylladb-dev mailing list](https://groups.google.com/forum/#!forum/scylladb-dev) or as a pull request to the [Scylla project on github](https://github.com/scylladb/scylla).
|
||||
Before you can contribute code to Scylla for the first time, you should sign the [Contributor License Agreement](https://www.scylladb.com/open-source/contributor-agreement/) and send the signed form cla@scylladb.com. You can then submit your changes as patches to the to the [scylladb-dev mailing list](https://groups.google.com/forum/#!forum/scylladb-dev) or as a pull request to the [Scylla project on github](https://github.com/scylladb/scylla).
|
||||
If you need help formatting or sending patches, [check out these instructions](https://github.com/scylladb/scylla/wiki/Formatting-and-sending-patches).
|
||||
|
||||
The Scylla C++ source code uses the [Seastar coding style](https://github.com/scylladb/seastar/blob/master/coding-style.md) so please adhere to that in your patches. Note that Scylla code is written with `using namespace seastar`, so should not explicitly add the `seastar::` prefix to Seastar symbols. You will usually not need to add `using namespace seastar` to new source files, because most Scylla header files have `#include "seastarx.hh"`, which does this.
|
||||
|
||||
36
HACKING.md
36
HACKING.md
@@ -280,45 +280,21 @@ Once the patch set is ready to be reviewed, push the branch to the public remote
|
||||
|
||||
### Development environment and source code navigation
|
||||
|
||||
Scylla includes a [CMake](https://cmake.org/) file, `CMakeLists.txt` that can be used with development environments so
|
||||
that they can properly analyze the source code. However, building with CMake is not yet officially supported.
|
||||
Scylla includes a [CMake](https://cmake.org/) file, `CMakeLists.txt`, for use only with development environments (not for building) so that they can properly analyze the source code.
|
||||
|
||||
Good IDEs that have support for CMake build toolchain are [CLion](https://www.jetbrains.com/clion/),
|
||||
[KDevelop](https://www.kdevelop.org/) and [QtCreator](https://wiki.qt.io/Qt_Creator).
|
||||
[CLion](https://www.jetbrains.com/clion/) is a commercial IDE offers reasonably good source code navigation and advice for code hygiene, though its C++ parser sometimes makes errors and flags false issues.
|
||||
|
||||
[Eclipse](https://eclipse.org/cdt/) is another open-source option. It doesn't natively work with CMake projects and its
|
||||
C++ parser has many issues.
|
||||
Other good options that directly parse CMake files are [KDevelop](https://www.kdevelop.org/) and [QtCreator](https://wiki.qt.io/Qt_Creator).
|
||||
|
||||
#### CLion
|
||||
To use the `CMakeLists.txt` file with these programs, define the `FOR_IDE` CMake variable or shell environmental variable.
|
||||
|
||||
[CLion](https://www.jetbrains.com/clion/) is a commercial IDE offers reasonably good source code navigation and advice
|
||||
for code hygiene, though its C++ parser sometimes makes errors and flags false issues. In order to enable proper code
|
||||
analysis in CLion, the following steps are needed:
|
||||
|
||||
1. Get the ScyllaDB source code by following the [Getting the source code](#getting-the-source-code).
|
||||
2. Follow the steps in [Dependencies](#dependencies) in order to install the required tools natively into your system.
|
||||
**Don't** follow the *frozen toolchain* part described there, since CMake checks for the build dependencies installed
|
||||
in the system, not in the container image provided by the toolchain.
|
||||
3. In CLion, select `File`→`Open` and select the main ScyllaDB directory in order to open the CMake project there. The
|
||||
project should open and fail to process the `CMakeLists.txt`. That's expected.
|
||||
4. In CLion, open `File`→`Settings`.
|
||||
5. Find and click on `Toolchains` (type *toolchains* into search box).
|
||||
6. Select the toolchain you will use, for instance the `Default` one.
|
||||
7. Type in the following system-installed tools to be used:
|
||||
- `CMake`: *cmake*
|
||||
- `Build Tool`: *ninja*
|
||||
- `C Compiler`: *clang*
|
||||
- `C++ Compiler`: *clang*
|
||||
8. On the `CMake` panel/tab, click on `Reload CMake Project`
|
||||
|
||||
After that, CLion should successfully initialize the CMake project (marked by `[Finished]` in the console) and the
|
||||
source code editor should provide code analysis support normally from now on.
|
||||
[Eclipse](https://eclipse.org/cdt/) is another open-source option. It doesn't natively work with CMake projects, and its C++ parser has many similar issues as CLion.
|
||||
|
||||
### Distributed compilation: `distcc` and `ccache`
|
||||
|
||||
Scylla's compilations times can be long. Two tools help somewhat:
|
||||
|
||||
- [ccache](https://ccache.samba.org/) caches compiled object files on disk and reuses them when possible
|
||||
- [ccache](https://ccache.samba.org/) caches compiled object files on disk and re-uses them when possible
|
||||
- [distcc](https://github.com/distcc/distcc) distributes compilation jobs to remote machines
|
||||
|
||||
A reasonably-powered laptop acts as the coordinator for compilation. A second, more powerful, machine acts as a passive compilation server.
|
||||
|
||||
@@ -49,7 +49,7 @@ The terms "**You**" or "**Licensee**" refer to any individual accessing or using
|
||||
|
||||
* **Ownership:** Licensor retains sole and exclusive ownership of all rights, interests and title in the Software and any scripts, processes, techniques, methodologies, inventions, know-how, concepts, formatting, arrangements, visual attributes, ideas, database rights, copyrights, patents, trade secrets, and other intellectual property related thereto, and all derivatives, enhancements, modifications and improvements thereof. Except for the limited license rights granted herein, Licensee has no rights in or to the Software and/ or Licensor’s trademarks, logo, or branding and You acknowledge that such Software, trademarks, logo, or branding is the sole property of Licensor.
|
||||
* **Feedback:** Licensee is not required to provide any suggestions, enhancement requests, recommendations or other feedback regarding the Software ("Feedback"). If, notwithstanding this policy, Licensee submits Feedback, Licensee understands and acknowledges that such Feedback is not submitted in confidence and Licensor assumes no obligation, expressed or implied, by considering it. All right in any trademark or logo of Licensor or its affiliates and You shall make no claim of right to the Software or any part thereof to be supplied by Licensor hereunder and acknowledges that as between Licensor and You, such Software is the sole proprietary, title and interest in and to Licensor.such Feedback shall be assigned to, and shall become the sole and exclusive property of, Licensor upon its creation.
|
||||
* Except for the rights expressly granted to You under this Agreement, You are not granted any other licenses or rights in the Software or otherwise. This Agreement constitutes the entire agreement between You and the Licensor with respect to the subject matter hereof and supersedes all prior or contemporaneous communications, representations, or agreements, whether oral or written.
|
||||
* Except for the rights expressly granted to You under this Agreement, You are not granted any other licenses or rights in the Software or otherwise. This Agreement constitutes the entire agreement between the You and the Licensor with respect to the subject matter hereof and supersedes all prior or contemporaneous communications, representations, or agreements, whether oral or written.
|
||||
* **Third-Party Software:** Customer acknowledges that the Software may contain open and closed source components (“OSS Components”) that are governed separately by certain licenses, in each case as further provided by Company upon request. Any applicable OSS Component license is solely between Licensee and the applicable licensor of the OSS Component and Licensee shall comply with the applicable OSS Component license.
|
||||
* If any provision of this Agreement is held to be invalid or unenforceable, such provision shall be struck and the remaining provisions shall remain in full force and effect.
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ If you are a developer working on Scylla, please read the [developer guidelines]
|
||||
|
||||
## Contact
|
||||
|
||||
* The [community forum] and [Slack channel] are for users to discuss configuration, management, and operations of ScyllaDB.
|
||||
* The [community forum] and [Slack channel] are for users to discuss configuration, management, and operations of the ScyllaDB open source.
|
||||
* The [developers mailing list] is for developers and people interested in following the development of ScyllaDB to discuss technical topics.
|
||||
|
||||
[Community forum]: https://forum.scylladb.com/
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=2025.2.0-dev
|
||||
VERSION=6.3.0-dev
|
||||
|
||||
if test -f version
|
||||
then
|
||||
|
||||
@@ -6,9 +6,7 @@
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <seastar/core/with_scheduling_group.hh>
|
||||
#include <seastar/net/dns.hh>
|
||||
|
||||
#include "controller.hh"
|
||||
#include "server.hh"
|
||||
#include "executor.hh"
|
||||
|
||||
@@ -88,9 +88,6 @@ public:
|
||||
static api_error table_not_found(std::string msg) {
|
||||
return api_error("TableNotFoundException", std::move(msg));
|
||||
}
|
||||
static api_error limit_exceeded(std::string msg) {
|
||||
return api_error("LimitExceededException", std::move(msg));
|
||||
}
|
||||
static api_error internal(std::string msg) {
|
||||
return api_error("InternalServerError", std::move(msg), http::reply::status_type::internal_server_error);
|
||||
}
|
||||
|
||||
@@ -7,13 +7,11 @@
|
||||
*/
|
||||
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/core/on_internal_error.hh>
|
||||
#include "alternator/executor.hh"
|
||||
#include "alternator/consumed_capacity.hh"
|
||||
#include "auth/permission.hh"
|
||||
#include "auth/resource.hh"
|
||||
#include "cdc/log.hh"
|
||||
#include "cdc/cdc_options.hh"
|
||||
#include "auth/service.hh"
|
||||
#include "db/config.hh"
|
||||
#include "utils/log.hh"
|
||||
@@ -47,7 +45,6 @@
|
||||
#include "alternator/rmw_operation.hh"
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include <seastar/core/loop.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/range/algorithm/find_end.hpp>
|
||||
#include <unordered_set>
|
||||
@@ -57,9 +54,6 @@
|
||||
#include "utils/error_injection.hh"
|
||||
#include "db/schema_tables.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "alternator/extract_from_attrs.hh"
|
||||
#include "types/types.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
@@ -220,7 +214,7 @@ static void validate_table_name(const std::string& name) {
|
||||
// instead of each component individually as DynamoDB does.
|
||||
// The view_name() function assumes the table_name has already been validated
|
||||
// but validates the legality of index_name and the combination of both.
|
||||
static std::string view_name(std::string_view table_name, std::string_view index_name, const std::string& delim = ":") {
|
||||
static std::string view_name(const std::string& table_name, std::string_view index_name, const std::string& delim = ":") {
|
||||
if (index_name.length() < 3) {
|
||||
throw api_error::validation("IndexName must be at least 3 characters long");
|
||||
}
|
||||
@@ -228,7 +222,7 @@ static std::string view_name(std::string_view table_name, std::string_view index
|
||||
throw api_error::validation(
|
||||
fmt::format("IndexName '{}' must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", index_name));
|
||||
}
|
||||
std::string ret = std::string(table_name) + delim + std::string(index_name);
|
||||
std::string ret = table_name + delim + std::string(index_name);
|
||||
if (ret.length() > max_table_name_length) {
|
||||
throw api_error::validation(
|
||||
fmt::format("The total length of TableName ('{}') and IndexName ('{}') cannot exceed {} characters",
|
||||
@@ -237,7 +231,7 @@ static std::string view_name(std::string_view table_name, std::string_view index
|
||||
return ret;
|
||||
}
|
||||
|
||||
static std::string lsi_name(std::string_view table_name, std::string_view index_name) {
|
||||
static std::string lsi_name(const std::string& table_name, std::string_view index_name) {
|
||||
return view_name(table_name, index_name, "!:");
|
||||
}
|
||||
|
||||
@@ -474,90 +468,7 @@ static rjson::value generate_arn_for_index(const schema& schema, std::string_vie
|
||||
schema.ks_name(), schema.cf_name(), index_name));
|
||||
}
|
||||
|
||||
// The following function checks if a given view has finished building.
|
||||
// We need this for describe_table() to know if a view is still backfilling,
|
||||
// or active.
|
||||
//
|
||||
// Currently we don't have in view_ptr the knowledge whether a view finished
|
||||
// building long ago - so checking this involves a somewhat inefficient, but
|
||||
// still node-local, process:
|
||||
// We need a table that can accurately tell that all nodes have finished
|
||||
// building this view. system.built_views is not good enough because it only
|
||||
// knows the view building status in the current node. In recent versions,
|
||||
// after PR #19745, we have a local table system.view_build_status_v2 with
|
||||
// global information, replacing the old system_distributed.view_build_status.
|
||||
// In theory, there can be a period during upgrading an old cluster when this
|
||||
// table is not yet available. However, since the IndexStatus is a new feature
|
||||
// too, it is acceptable that it doesn't yet work in the middle of the update.
|
||||
static future<bool> is_view_built(
|
||||
view_ptr view,
|
||||
service::storage_proxy& proxy,
|
||||
service::client_state& client_state,
|
||||
tracing::trace_state_ptr trace_state,
|
||||
service_permit permit) {
|
||||
auto schema = proxy.data_dictionary().find_table(
|
||||
"system", db::system_keyspace::VIEW_BUILD_STATUS_V2).schema();
|
||||
// The table system.view_build_status_v2 has "keyspace_name" and
|
||||
// "view_name" as the partition key, and each clustering row has
|
||||
// "host_id" as clustering key and a string "status". We need to
|
||||
// read a single partition:
|
||||
partition_key pk = partition_key::from_exploded(*schema,
|
||||
{utf8_type->decompose(view->ks_name()),
|
||||
utf8_type->decompose(view->cf_name())});
|
||||
dht::partition_range_vector partition_ranges{
|
||||
dht::partition_range(dht::decorate_key(*schema, pk))};
|
||||
auto selection = cql3::selection::selection::wildcard(schema); // only for get_query_options()!
|
||||
auto partition_slice = query::partition_slice(
|
||||
{query::clustering_range::make_open_ended_both_sides()},
|
||||
{}, // static columns
|
||||
{schema->get_column_definition("status")->id}, // regular columns
|
||||
selection->get_query_options());
|
||||
auto command = ::make_lw_shared<query::read_command>(
|
||||
schema->id(), schema->version(), partition_slice,
|
||||
proxy.get_max_result_size(partition_slice),
|
||||
query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
service::storage_proxy::coordinator_query_result qr =
|
||||
co_await proxy.query(
|
||||
schema, std::move(command), std::move(partition_ranges),
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
service::storage_proxy::coordinator_query_options(
|
||||
executor::default_timeout(), std::move(permit), client_state, trace_state));
|
||||
query::result_set rs = query::result_set::from_raw_result(
|
||||
schema, partition_slice, *qr.query_result);
|
||||
std::unordered_map<locator::host_id, sstring> statuses;
|
||||
for (auto&& r : rs.rows()) {
|
||||
auto host_id = r.get<utils::UUID>("host_id");
|
||||
auto status = r.get<sstring>("status");
|
||||
if (host_id && status) {
|
||||
statuses.emplace(locator::host_id(*host_id), *status);
|
||||
}
|
||||
}
|
||||
// A view is considered "built" if all nodes reported SUCCESS in having
|
||||
// built this view. Note that we need this "SUCCESS" for all nodes in the
|
||||
// cluster - even those that are temporarily down (their success is known
|
||||
// by this node, even if they are down). Conversely, we don't care what is
|
||||
// the recorded status for any node which is no longer in the cluster - it
|
||||
// is possible we forgot to erase the status of nodes that left the
|
||||
// cluster, but here we just ignore them and look at the nodes actually
|
||||
// in the topology.
|
||||
bool all_built = true;
|
||||
auto token_metadata = proxy.get_token_metadata_ptr();
|
||||
token_metadata->get_topology().for_each_node(
|
||||
[&] (const locator::node& node) {
|
||||
// Note: we could skip nodes in DCs which have no replication of
|
||||
// this view. However, in practice even those nodes would run
|
||||
// the view building (and just see empty content) so we don't
|
||||
// need to bother with this skipping.
|
||||
auto it = statuses.find(node.host_id());
|
||||
if (it == statuses.end() || it->second != "SUCCESS") {
|
||||
all_built = false;
|
||||
}
|
||||
});
|
||||
co_return all_built;
|
||||
|
||||
}
|
||||
|
||||
static future<rjson::value> fill_table_description(schema_ptr schema, table_status tbl_status, service::storage_proxy& proxy, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit)
|
||||
static rjson::value fill_table_description(schema_ptr schema, table_status tbl_status, service::storage_proxy const& proxy)
|
||||
{
|
||||
rjson::value table_description = rjson::empty_object();
|
||||
auto tags_ptr = db::get_tags_of_table(schema);
|
||||
@@ -636,22 +547,7 @@ static future<rjson::value> fill_table_description(schema_ptr schema, table_stat
|
||||
// FIXME: we have to get ProjectionType from the schema when it is added
|
||||
rjson::add(view_entry, "Projection", std::move(projection));
|
||||
// Local secondary indexes are marked by an extra '!' sign occurring before the ':' delimiter
|
||||
bool is_lsi = (delim_it > 1 && cf_name[delim_it-1] == '!');
|
||||
// Add IndexStatus and Backfilling flags, but only for GSIs -
|
||||
// LSIs can only be created with the table itself and do not
|
||||
// have a status. Alternator schema operations are synchronous
|
||||
// so only two combinations of these flags are possible: ACTIVE
|
||||
// (for a built view) or CREATING+Backfilling (if view building
|
||||
// is in progress).
|
||||
if (!is_lsi) {
|
||||
if (co_await is_view_built(vptr, proxy, client_state, trace_state, permit)) {
|
||||
rjson::add(view_entry, "IndexStatus", "ACTIVE");
|
||||
} else {
|
||||
rjson::add(view_entry, "IndexStatus", "CREATING");
|
||||
rjson::add(view_entry, "Backfilling", rjson::value(true));
|
||||
}
|
||||
}
|
||||
rjson::value& index_array = is_lsi ? lsi_array : gsi_array;
|
||||
rjson::value& index_array = (delim_it > 1 && cf_name[delim_it-1] == '!') ? lsi_array : gsi_array;
|
||||
rjson::push_back(index_array, std::move(view_entry));
|
||||
}
|
||||
if (!lsi_array.Empty()) {
|
||||
@@ -675,7 +571,7 @@ static future<rjson::value> fill_table_description(schema_ptr schema, table_stat
|
||||
executor::supplement_table_stream_info(table_description, *schema, proxy);
|
||||
|
||||
// FIXME: still missing some response fields (issue #5026)
|
||||
co_return table_description;
|
||||
return table_description;
|
||||
}
|
||||
|
||||
bool is_alternator_keyspace(const sstring& ks_name) {
|
||||
@@ -694,11 +590,11 @@ future<executor::request_return_type> executor::describe_table(client_state& cli
|
||||
|
||||
tracing::add_table_name(trace_state, schema->ks_name(), schema->cf_name());
|
||||
|
||||
rjson::value table_description = co_await fill_table_description(schema, table_status::active, _proxy, client_state, trace_state, permit);
|
||||
rjson::value table_description = fill_table_description(schema, table_status::active, _proxy);
|
||||
rjson::value response = rjson::empty_object();
|
||||
rjson::add(response, "Table", std::move(table_description));
|
||||
elogger.trace("returning {}", response);
|
||||
co_return make_jsonable(std::move(response));
|
||||
return make_ready_future<executor::request_return_type>(make_jsonable(std::move(response)));
|
||||
}
|
||||
|
||||
// Check CQL's Role-Based Access Control (RBAC) permission_to_check (MODIFY,
|
||||
@@ -759,7 +655,7 @@ future<executor::request_return_type> executor::delete_table(client_state& clien
|
||||
auto& p = _proxy.container();
|
||||
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
rjson::value table_description = co_await fill_table_description(schema, table_status::deleting, _proxy, client_state, trace_state, permit);
|
||||
rjson::value table_description = fill_table_description(schema, table_status::deleting, _proxy);
|
||||
co_await verify_permission(_enforce_authorization, client_state, schema, auth::permission::DROP);
|
||||
co_await _mm.container().invoke_on(0, [&, cs = client_state.move_to_other_shard()] (service::migration_manager& mm) -> future<> {
|
||||
// FIXME: the following needs to be in a loop. If mm.announce() below
|
||||
@@ -807,7 +703,7 @@ future<executor::request_return_type> executor::delete_table(client_state& clien
|
||||
co_return make_jsonable(std::move(response));
|
||||
}
|
||||
|
||||
static data_type parse_key_type(std::string_view type) {
|
||||
static data_type parse_key_type(const std::string& type) {
|
||||
// Note that keys are only allowed to be string, blob or number (S/B/N).
|
||||
// The other types: boolean and various lists or sets - are not allowed.
|
||||
if (type.length() == 1) {
|
||||
@@ -822,7 +718,7 @@ static data_type parse_key_type(std::string_view type) {
|
||||
}
|
||||
|
||||
|
||||
static void add_column(schema_builder& builder, const std::string& name, const rjson::value& attribute_definitions, column_kind kind, bool computed_column=false) {
|
||||
static void add_column(schema_builder& builder, const std::string& name, const rjson::value& attribute_definitions, column_kind kind) {
|
||||
// FIXME: Currently, the column name ATTRS_COLUMN_NAME is not allowed
|
||||
// because we use it for our untyped attribute map, and we can't have a
|
||||
// second column with the same name. We should fix this, by renaming
|
||||
@@ -834,16 +730,7 @@ static void add_column(schema_builder& builder, const std::string& name, const r
|
||||
const rjson::value& attribute_info = *it;
|
||||
if (attribute_info["AttributeName"].GetString() == name) {
|
||||
auto type = attribute_info["AttributeType"].GetString();
|
||||
data_type dt = parse_key_type(type);
|
||||
if (computed_column) {
|
||||
// Computed column for GSI (doesn't choose a real column as-is
|
||||
// but rather extracts a single value from the ":attrs" map)
|
||||
alternator_type at = type_info_from_string(type).atype;
|
||||
builder.with_computed_column(to_bytes(name), dt, kind,
|
||||
std::make_unique<extract_from_attrs_column_computation>(to_bytes(name), at));
|
||||
} else {
|
||||
builder.with_column(to_bytes(name), dt, kind);
|
||||
}
|
||||
builder.with_column(to_bytes(name), parse_key_type(type), kind);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1184,87 +1071,6 @@ static std::unordered_set<std::string> validate_attribute_definitions(const rjso
|
||||
return seen_attribute_names;
|
||||
}
|
||||
|
||||
// The following "extract_from_attrs_column_computation" implementation is
|
||||
// what allows Alternator GSIs to use in a materialized view's key a member
|
||||
// from the ":attrs" map instead of a real column in the schema:
|
||||
|
||||
const bytes extract_from_attrs_column_computation::MAP_NAME = executor::ATTRS_COLUMN_NAME;
|
||||
|
||||
column_computation_ptr extract_from_attrs_column_computation::clone() const {
|
||||
return std::make_unique<extract_from_attrs_column_computation>(*this);
|
||||
}
|
||||
|
||||
// Serialize the *definition* of this column computation into a JSON
|
||||
// string with a unique "type" string - TYPE_NAME - which then causes
|
||||
// column_computation::deserialize() to create an object from this class.
|
||||
bytes extract_from_attrs_column_computation::serialize() const {
|
||||
rjson::value ret = rjson::empty_object();
|
||||
rjson::add(ret, "type", TYPE_NAME);
|
||||
rjson::add(ret, "attr_name", rjson::from_string(to_string_view(_attr_name)));
|
||||
rjson::add(ret, "desired_type", represent_type(_desired_type).ident);
|
||||
return to_bytes(rjson::print(ret));
|
||||
}
|
||||
|
||||
// Construct an extract_from_attrs_column_computation object based on the
|
||||
// saved output of serialize(). Calls on_internal_error() if the string
|
||||
// doesn't match the expected output format of serialize(). "type" is not
|
||||
// checked - we assume the caller (column_computation::deserialize()) won't
|
||||
// call this constructor if "type" doesn't match.
|
||||
extract_from_attrs_column_computation::extract_from_attrs_column_computation(const rjson::value &v) {
|
||||
const rjson::value* attr_name = rjson::find(v, "attr_name");
|
||||
if (attr_name->IsString()) {
|
||||
_attr_name = bytes(to_bytes_view(rjson::to_string_view(*attr_name)));
|
||||
const rjson::value* desired_type = rjson::find(v, "desired_type");
|
||||
if (desired_type->IsString()) {
|
||||
_desired_type = type_info_from_string(rjson::to_string_view(*desired_type)).atype;
|
||||
switch (_desired_type) {
|
||||
case alternator_type::S:
|
||||
case alternator_type::B:
|
||||
case alternator_type::N:
|
||||
// We're done
|
||||
return;
|
||||
default:
|
||||
// Fall through to on_internal_error below.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
on_internal_error(elogger, format("Improperly formatted alternator::extract_from_attrs_column_computation computed column definition: {}", v));
|
||||
}
|
||||
|
||||
regular_column_transformation::result extract_from_attrs_column_computation::compute_value(
|
||||
const schema& schema,
|
||||
const partition_key& key,
|
||||
const db::view::clustering_or_static_row& row) const
|
||||
{
|
||||
const column_definition* attrs_col = schema.get_column_definition(MAP_NAME);
|
||||
if (!attrs_col || !attrs_col->is_regular() || !attrs_col->is_multi_cell()) {
|
||||
on_internal_error(elogger, "extract_from_attrs_column_computation::compute_value() on a table without an attrs map");
|
||||
}
|
||||
// Look for the desired attribute _attr_name in the attrs_col map in row:
|
||||
const atomic_cell_or_collection* attrs = row.cells().find_cell(attrs_col->id);
|
||||
if (!attrs) {
|
||||
return regular_column_transformation::result();
|
||||
}
|
||||
collection_mutation_view cmv = attrs->as_collection_mutation();
|
||||
return cmv.with_deserialized(*attrs_col->type, [this] (const collection_mutation_view_description& cmvd) {
|
||||
for (auto&& [key, cell] : cmvd.cells) {
|
||||
if (key == _attr_name) {
|
||||
return regular_column_transformation::result(cell,
|
||||
std::bind(serialized_value_if_type, std::placeholders::_1, _desired_type));
|
||||
}
|
||||
}
|
||||
return regular_column_transformation::result();
|
||||
});
|
||||
}
|
||||
|
||||
// extract_from_attrs_column_computation needs the whole row to compute
|
||||
// value, it can't use just the partition key.
|
||||
bytes extract_from_attrs_column_computation::compute_value(const schema&, const partition_key&) const {
|
||||
on_internal_error(elogger, "extract_from_attrs_column_computation::compute_value called without row");
|
||||
}
|
||||
|
||||
|
||||
static future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, service::storage_proxy& sp, service::migration_manager& mm, gms::gossiper& gossiper, bool enforce_authorization) {
|
||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||
|
||||
@@ -1279,25 +1085,22 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
co_return api_error::validation(fmt::format("Prefix {} is reserved for accessing internal tables", executor::INTERNAL_TABLE_PREFIX));
|
||||
}
|
||||
std::string keyspace_name = executor::KEYSPACE_NAME_PREFIX + table_name;
|
||||
const rjson::value* attribute_definitions = rjson::find(request, "AttributeDefinitions");
|
||||
if (attribute_definitions == nullptr) {
|
||||
co_return api_error::validation("Missing AttributeDefinitions in CreateTable request");
|
||||
}
|
||||
const rjson::value& attribute_definitions = request["AttributeDefinitions"];
|
||||
// Save the list of AttributeDefinitions in unused_attribute_definitions,
|
||||
// and below remove each one as we see it in a KeySchema of the table or
|
||||
// any of its GSIs or LSIs. If anything remains in this set at the end of
|
||||
// this function, it's an error.
|
||||
std::unordered_set<std::string> unused_attribute_definitions =
|
||||
validate_attribute_definitions(*attribute_definitions);
|
||||
validate_attribute_definitions(attribute_definitions);
|
||||
|
||||
tracing::add_table_name(trace_state, keyspace_name, table_name);
|
||||
|
||||
schema_builder builder(keyspace_name, table_name);
|
||||
auto [hash_key, range_key] = parse_key_schema(request);
|
||||
add_column(builder, hash_key, *attribute_definitions, column_kind::partition_key);
|
||||
add_column(builder, hash_key, attribute_definitions, column_kind::partition_key);
|
||||
unused_attribute_definitions.erase(hash_key);
|
||||
if (!range_key.empty()) {
|
||||
add_column(builder, range_key, *attribute_definitions, column_kind::clustering_key);
|
||||
add_column(builder, range_key, attribute_definitions, column_kind::clustering_key);
|
||||
unused_attribute_definitions.erase(range_key);
|
||||
}
|
||||
builder.with_column(bytes(executor::ATTRS_COLUMN_NAME), attrs_type(), column_kind::regular_column);
|
||||
@@ -1306,15 +1109,67 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
|
||||
schema_ptr partial_schema = builder.build();
|
||||
|
||||
// Parse Local/GlobalSecondaryIndexes parameters before creating the
|
||||
// base table, so if we have a parse errors we can fail without creating
|
||||
// Parse GlobalSecondaryIndexes parameters before creating the base
|
||||
// table, so if we have a parse errors we can fail without creating
|
||||
// any table.
|
||||
const rjson::value* gsi = rjson::find(request, "GlobalSecondaryIndexes");
|
||||
std::vector<schema_builder> view_builders;
|
||||
std::unordered_set<std::string> index_names;
|
||||
// Remember the attributes used for LSI keys. Since LSI must be created
|
||||
// with the table, we make these attributes real schema columns, and need
|
||||
// to remember this below if the same attributes are used as GSI keys.
|
||||
std::unordered_set<std::string> lsi_range_keys;
|
||||
if (gsi) {
|
||||
if (!gsi->IsArray()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexes must be an array.");
|
||||
}
|
||||
for (const rjson::value& g : gsi->GetArray()) {
|
||||
const rjson::value* index_name_v = rjson::find(g, "IndexName");
|
||||
if (!index_name_v || !index_name_v->IsString()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexes IndexName must be a string.");
|
||||
}
|
||||
std::string_view index_name = rjson::to_string_view(*index_name_v);
|
||||
auto [it, added] = index_names.emplace(index_name);
|
||||
if (!added) {
|
||||
co_return api_error::validation(fmt::format("Duplicate IndexName '{}', ", index_name));
|
||||
}
|
||||
std::string vname(view_name(table_name, index_name));
|
||||
elogger.trace("Adding GSI {}", index_name);
|
||||
// FIXME: read and handle "Projection" parameter. This will
|
||||
// require the MV code to copy just parts of the attrs map.
|
||||
schema_builder view_builder(keyspace_name, vname);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(g);
|
||||
if (partial_schema->get_column_definition(to_bytes(view_hash_key)) == nullptr) {
|
||||
// A column that exists in a global secondary index is upgraded from being a map entry
|
||||
// to having a regular column definition in the base schema
|
||||
add_column(builder, view_hash_key, attribute_definitions, column_kind::regular_column);
|
||||
}
|
||||
add_column(view_builder, view_hash_key, attribute_definitions, column_kind::partition_key);
|
||||
unused_attribute_definitions.erase(view_hash_key);
|
||||
if (!view_range_key.empty()) {
|
||||
if (partial_schema->get_column_definition(to_bytes(view_range_key)) == nullptr) {
|
||||
// A column that exists in a global secondary index is upgraded from being a map entry
|
||||
// to having a regular column definition in the base schema
|
||||
if (partial_schema->get_column_definition(to_bytes(view_hash_key)) == nullptr) {
|
||||
// FIXME: this is alternator limitation only, because Scylla's materialized views
|
||||
// we use underneath do not allow more than 1 base regular column to be part of the MV key
|
||||
elogger.warn("Only 1 regular column from the base table should be used in the GSI key in order to ensure correct liveness management without assumptions");
|
||||
}
|
||||
add_column(builder, view_range_key, attribute_definitions, column_kind::regular_column);
|
||||
}
|
||||
add_column(view_builder, view_range_key, attribute_definitions, column_kind::clustering_key);
|
||||
unused_attribute_definitions.erase(view_range_key);
|
||||
}
|
||||
// Base key columns which aren't part of the index's key need to
|
||||
// be added to the view nonetheless, as (additional) clustering
|
||||
// key(s).
|
||||
if (hash_key != view_hash_key && hash_key != view_range_key) {
|
||||
add_column(view_builder, hash_key, attribute_definitions, column_kind::clustering_key);
|
||||
}
|
||||
if (!range_key.empty() && range_key != view_hash_key && range_key != view_range_key) {
|
||||
add_column(view_builder, range_key, attribute_definitions, column_kind::clustering_key);
|
||||
}
|
||||
// GSIs have no tags:
|
||||
view_builder.add_extension(db::tags_extension::NAME, ::make_shared<db::tags_extension>());
|
||||
view_builders.emplace_back(std::move(view_builder));
|
||||
}
|
||||
}
|
||||
|
||||
const rjson::value* lsi = rjson::find(request, "LocalSecondaryIndexes");
|
||||
if (lsi) {
|
||||
@@ -1343,7 +1198,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
if (view_hash_key != hash_key) {
|
||||
co_return api_error::validation("LocalSecondaryIndex hash key must match the base table hash key");
|
||||
}
|
||||
add_column(view_builder, view_hash_key, *attribute_definitions, column_kind::partition_key);
|
||||
add_column(view_builder, view_hash_key, attribute_definitions, column_kind::partition_key);
|
||||
unused_attribute_definitions.erase(view_hash_key);
|
||||
if (view_range_key.empty()) {
|
||||
co_return api_error::validation("LocalSecondaryIndex must specify a sort key");
|
||||
@@ -1353,14 +1208,14 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
co_return api_error::validation("LocalSecondaryIndex sort key cannot be the same as hash key");
|
||||
}
|
||||
if (view_range_key != range_key) {
|
||||
add_column(builder, view_range_key, *attribute_definitions, column_kind::regular_column);
|
||||
add_column(builder, view_range_key, attribute_definitions, column_kind::regular_column);
|
||||
}
|
||||
add_column(view_builder, view_range_key, *attribute_definitions, column_kind::clustering_key);
|
||||
add_column(view_builder, view_range_key, attribute_definitions, column_kind::clustering_key);
|
||||
// Base key columns which aren't part of the index's key need to
|
||||
// be added to the view nonetheless, as (additional) clustering
|
||||
// key(s).
|
||||
if (!range_key.empty() && view_range_key != range_key) {
|
||||
add_column(view_builder, range_key, *attribute_definitions, column_kind::clustering_key);
|
||||
add_column(view_builder, range_key, attribute_definitions, column_kind::clustering_key);
|
||||
}
|
||||
view_builder.with_column(bytes(executor::ATTRS_COLUMN_NAME), attrs_type(), column_kind::regular_column);
|
||||
// Note above we don't need to add virtual columns, as all
|
||||
@@ -1372,68 +1227,9 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
std::map<sstring, sstring> tags_map = {{db::SYNCHRONOUS_VIEW_UPDATES_TAG_KEY, "true"}};
|
||||
view_builder.add_extension(db::tags_extension::NAME, ::make_shared<db::tags_extension>(tags_map));
|
||||
view_builders.emplace_back(std::move(view_builder));
|
||||
lsi_range_keys.emplace(view_range_key);
|
||||
}
|
||||
}
|
||||
|
||||
const rjson::value* gsi = rjson::find(request, "GlobalSecondaryIndexes");
|
||||
if (gsi) {
|
||||
if (!gsi->IsArray()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexes must be an array.");
|
||||
}
|
||||
for (const rjson::value& g : gsi->GetArray()) {
|
||||
const rjson::value* index_name_v = rjson::find(g, "IndexName");
|
||||
if (!index_name_v || !index_name_v->IsString()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexes IndexName must be a string.");
|
||||
}
|
||||
std::string_view index_name = rjson::to_string_view(*index_name_v);
|
||||
auto [it, added] = index_names.emplace(index_name);
|
||||
if (!added) {
|
||||
co_return api_error::validation(fmt::format("Duplicate IndexName '{}', ", index_name));
|
||||
}
|
||||
std::string vname(view_name(table_name, index_name));
|
||||
elogger.trace("Adding GSI {}", index_name);
|
||||
// FIXME: read and handle "Projection" parameter. This will
|
||||
// require the MV code to copy just parts of the attrs map.
|
||||
schema_builder view_builder(keyspace_name, vname);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(g);
|
||||
|
||||
// If an attribute is already a real column in the base table
|
||||
// (i.e., a key attribute) or we already made it a real column
|
||||
// as an LSI key above, we can use it directly as a view key.
|
||||
// Otherwise, we need to add it as a "computed column", which
|
||||
// extracts and deserializes the attribute from the ":attrs" map.
|
||||
bool view_hash_key_real_column =
|
||||
partial_schema->get_column_definition(to_bytes(view_hash_key)) ||
|
||||
lsi_range_keys.contains(view_hash_key);
|
||||
add_column(view_builder, view_hash_key, *attribute_definitions, column_kind::partition_key, !view_hash_key_real_column);
|
||||
unused_attribute_definitions.erase(view_hash_key);
|
||||
if (!view_range_key.empty()) {
|
||||
bool view_range_key_real_column =
|
||||
partial_schema->get_column_definition(to_bytes(view_range_key)) ||
|
||||
lsi_range_keys.contains(view_range_key);
|
||||
add_column(view_builder, view_range_key, *attribute_definitions, column_kind::clustering_key, !view_range_key_real_column);
|
||||
if (!partial_schema->get_column_definition(to_bytes(view_range_key)) &&
|
||||
!partial_schema->get_column_definition(to_bytes(view_hash_key))) {
|
||||
// FIXME: This warning should go away. See issue #6714
|
||||
elogger.warn("Only 1 regular column from the base table should be used in the GSI key in order to ensure correct liveness management without assumptions");
|
||||
}
|
||||
unused_attribute_definitions.erase(view_range_key);
|
||||
}
|
||||
// Base key columns which aren't part of the index's key need to
|
||||
// be added to the view nonetheless, as (additional) clustering
|
||||
// key(s).
|
||||
if (hash_key != view_hash_key && hash_key != view_range_key) {
|
||||
add_column(view_builder, hash_key, *attribute_definitions, column_kind::clustering_key);
|
||||
}
|
||||
if (!range_key.empty() && range_key != view_hash_key && range_key != view_range_key) {
|
||||
add_column(view_builder, range_key, *attribute_definitions, column_kind::clustering_key);
|
||||
}
|
||||
// GSIs have no tags:
|
||||
view_builder.add_extension(db::tags_extension::NAME, ::make_shared<db::tags_extension>());
|
||||
view_builders.emplace_back(std::move(view_builder));
|
||||
}
|
||||
}
|
||||
if (!unused_attribute_definitions.empty()) {
|
||||
co_return api_error::validation(fmt::format(
|
||||
"AttributeDefinitions defines spurious attributes not used by any KeySchema: {}",
|
||||
@@ -1574,37 +1370,12 @@ future<executor::request_return_type> executor::create_table(client_state& clien
|
||||
});
|
||||
}
|
||||
|
||||
// When UpdateTable adds a GSI, the type of its key columns must be specified
|
||||
// in a AttributeDefinitions. If one of these key columns are *already* key
|
||||
// columns of the base table or any of its prior GSIs or LSIs, the type
|
||||
// given in AttributeDefinitions must match the type of the existing key -
|
||||
// otherwise Alternator will not know which type to enforce in new writes.
|
||||
// This function checks for such conflicts. It assumes that the structure of
|
||||
// the given attribute_definitions was already validated (with
|
||||
// validate_attribute_definitions()).
|
||||
// This function should be called multiple times - once for the base schema
|
||||
// and once for each of its views (existing GSIs and LSIs on this table).
|
||||
static void check_attribute_definitions_conflicts(const rjson::value& attribute_definitions, const schema& schema) {
|
||||
for (auto& def : schema.primary_key_columns()) {
|
||||
std::string def_type = type_to_string(def.type);
|
||||
for (auto it = attribute_definitions.Begin(); it != attribute_definitions.End(); ++it) {
|
||||
const rjson::value& attribute_info = *it;
|
||||
if (attribute_info["AttributeName"].GetString() == def.name_as_text()) {
|
||||
auto type = attribute_info["AttributeType"].GetString();
|
||||
if (type != def_type) {
|
||||
throw api_error::validation(fmt::format("AttributeDefinitions redefined {} to {} already a key attribute of type {} in this table", def.name_as_text(), type, def_type));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
future<executor::request_return_type> executor::update_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.update_table++;
|
||||
elogger.trace("Updating table {}", request);
|
||||
|
||||
static const std::vector<sstring> unsupported = {
|
||||
"GlobalSecondaryIndexUpdates",
|
||||
"ProvisionedThroughput",
|
||||
"ReplicaUpdates",
|
||||
"SSESpecification",
|
||||
@@ -1616,14 +1387,11 @@ future<executor::request_return_type> executor::update_table(client_state& clien
|
||||
}
|
||||
}
|
||||
|
||||
bool empty_request = true;
|
||||
|
||||
if (rjson::find(request, "BillingMode")) {
|
||||
empty_request = false;
|
||||
verify_billing_mode(request);
|
||||
}
|
||||
|
||||
co_return co_await _mm.container().invoke_on(0, [&p = _proxy.container(), request = std::move(request), gt = tracing::global_trace_state_ptr(std::move(trace_state)), enforce_authorization = bool(_enforce_authorization), client_state_other_shard = client_state.move_to_other_shard(), empty_request]
|
||||
co_return co_await _mm.container().invoke_on(0, [&p = _proxy.container(), request = std::move(request), gt = tracing::global_trace_state_ptr(std::move(trace_state)), enforce_authorization = bool(_enforce_authorization), client_state_other_shard = client_state.move_to_other_shard()]
|
||||
(service::migration_manager& mm) mutable -> future<executor::request_return_type> {
|
||||
// FIXME: the following needs to be in a loop. If mm.announce() below
|
||||
// fails, we need to retry the whole thing.
|
||||
@@ -1643,183 +1411,19 @@ future<executor::request_return_type> executor::update_table(client_state& clien
|
||||
|
||||
rjson::value* stream_specification = rjson::find(request, "StreamSpecification");
|
||||
if (stream_specification && stream_specification->IsObject()) {
|
||||
empty_request = false;
|
||||
add_stream_options(*stream_specification, builder, p.local());
|
||||
// Alternator Streams doesn't yet work when the table uses tablets (#16317)
|
||||
auto stream_enabled = rjson::find(*stream_specification, "StreamEnabled");
|
||||
if (stream_enabled && stream_enabled->IsBool()) {
|
||||
if (stream_enabled->GetBool()) {
|
||||
if (p.local().local_db().find_keyspace(tab->ks_name()).get_replication_strategy().uses_tablets()) {
|
||||
co_return api_error::validation("Streams not yet supported on a table using tablets (issue #16317). "
|
||||
"If you want to enable streams, re-create this table with vnodes (with the tag 'experimental:initial_tablets' set to 'none').");
|
||||
}
|
||||
if (tab->cdc_options().enabled()) {
|
||||
co_return api_error::validation("Table already has an enabled stream: TableName: " + tab->cf_name());
|
||||
}
|
||||
}
|
||||
else if (!tab->cdc_options().enabled()) {
|
||||
co_return api_error::validation("Table has no stream to disable: TableName: " + tab->cf_name());
|
||||
}
|
||||
if (stream_enabled && stream_enabled->IsBool() && stream_enabled->GetBool() &&
|
||||
p.local().local_db().find_keyspace(tab->ks_name()).get_replication_strategy().uses_tablets()) {
|
||||
co_return api_error::validation("Streams not yet supported on a table using tablets (issue #16317). "
|
||||
"If you want to enable streams, re-create this table with vnodes (with the tag 'experimental:initial_tablets' set to 'none').");
|
||||
}
|
||||
}
|
||||
|
||||
auto schema = builder.build();
|
||||
std::vector<view_ptr> new_views;
|
||||
std::vector<std::string> dropped_views;
|
||||
|
||||
rjson::value* gsi_updates = rjson::find(request, "GlobalSecondaryIndexUpdates");
|
||||
if (gsi_updates) {
|
||||
if (!gsi_updates->IsArray()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates must be an array");
|
||||
}
|
||||
if (gsi_updates->Size() > 1) {
|
||||
// Although UpdateTable takes an array of operations and could
|
||||
// support multiple Create and/or Delete operations in one
|
||||
// command, DynamoDB doesn't actually allows this, and throws
|
||||
// a LimitExceededException if this is attempted.
|
||||
co_return api_error::limit_exceeded("GlobalSecondaryIndexUpdates only allows one index creation or deletion");
|
||||
}
|
||||
if (gsi_updates->Size() == 1) {
|
||||
empty_request = false;
|
||||
if (!(*gsi_updates)[0].IsObject() || (*gsi_updates)[0].MemberCount() != 1) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates array must contain one object with a Create, Delete or Update operation");
|
||||
}
|
||||
auto it = (*gsi_updates)[0].MemberBegin();
|
||||
const std::string_view op = rjson::to_string_view(it->name);
|
||||
if (!it->value.IsObject()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates entries must be objects");
|
||||
}
|
||||
const rjson::value* index_name_v = rjson::find(it->value, "IndexName");
|
||||
if (!index_name_v || !index_name_v->IsString()) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates operation must have IndexName");
|
||||
}
|
||||
std::string_view index_name = rjson::to_string_view(*index_name_v);
|
||||
std::string_view table_name = schema->cf_name();
|
||||
std::string_view keyspace_name = schema->ks_name();
|
||||
std::string vname(view_name(table_name, index_name));
|
||||
if (op == "Create") {
|
||||
const rjson::value* attribute_definitions = rjson::find(request, "AttributeDefinitions");
|
||||
if (!attribute_definitions) {
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates Create needs AttributeDefinitions");
|
||||
}
|
||||
std::unordered_set<std::string> unused_attribute_definitions =
|
||||
validate_attribute_definitions(*attribute_definitions);
|
||||
check_attribute_definitions_conflicts(*attribute_definitions, *schema);
|
||||
for (auto& view : p.local().data_dictionary().find_column_family(tab).views()) {
|
||||
check_attribute_definitions_conflicts(*attribute_definitions, *view);
|
||||
}
|
||||
|
||||
if (p.local().data_dictionary().has_schema(keyspace_name, vname)) {
|
||||
// Surprisingly, DynamoDB uses validation error here, not resource_in_use
|
||||
co_return api_error::validation(fmt::format(
|
||||
"GSI {} already exists in table {}", index_name, table_name));
|
||||
}
|
||||
if (p.local().data_dictionary().has_schema(keyspace_name, lsi_name(table_name, index_name))) {
|
||||
co_return api_error::validation(fmt::format(
|
||||
"LSI {} already exists in table {}, can't use same name for GSI", index_name, table_name));
|
||||
}
|
||||
|
||||
elogger.trace("Adding GSI {}", index_name);
|
||||
// FIXME: read and handle "Projection" parameter. This will
|
||||
// require the MV code to copy just parts of the attrs map.
|
||||
schema_builder view_builder(keyspace_name, vname);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(it->value);
|
||||
// If an attribute is already a real column in the base
|
||||
// table (i.e., a key attribute in the base table or LSI),
|
||||
// we can use it directly as a view key. Otherwise, we
|
||||
// need to add it as a "computed column", which extracts
|
||||
// and deserializes the attribute from the ":attrs" map.
|
||||
bool view_hash_key_real_column =
|
||||
schema->get_column_definition(to_bytes(view_hash_key));
|
||||
add_column(view_builder, view_hash_key, *attribute_definitions, column_kind::partition_key, !view_hash_key_real_column);
|
||||
unused_attribute_definitions.erase(view_hash_key);
|
||||
if (!view_range_key.empty()) {
|
||||
bool view_range_key_real_column =
|
||||
schema->get_column_definition(to_bytes(view_range_key));
|
||||
add_column(view_builder, view_range_key, *attribute_definitions, column_kind::clustering_key, !view_range_key_real_column);
|
||||
if (!schema->get_column_definition(to_bytes(view_range_key)) &&
|
||||
!schema->get_column_definition(to_bytes(view_hash_key))) {
|
||||
// FIXME: This warning should go away. See issue #6714
|
||||
elogger.warn("Only 1 regular column from the base table should be used in the GSI key in order to ensure correct liveness management without assumptions");
|
||||
}
|
||||
unused_attribute_definitions.erase(view_range_key);
|
||||
}
|
||||
// Surprisingly, although DynamoDB checks for unused
|
||||
// AttributeDefinitions in CreateTable, it does not
|
||||
// check it in UpdateTable. We decided to check anyway.
|
||||
if (!unused_attribute_definitions.empty()) {
|
||||
co_return api_error::validation(fmt::format(
|
||||
"AttributeDefinitions defines spurious attributes not used by any KeySchema: {}",
|
||||
unused_attribute_definitions));
|
||||
}
|
||||
// Base key columns which aren't part of the index's key need to
|
||||
// be added to the view nonetheless, as (additional) clustering
|
||||
// key(s).
|
||||
for (auto& def : schema->primary_key_columns()) {
|
||||
if (def.name_as_text() != view_hash_key && def.name_as_text() != view_range_key) {
|
||||
view_builder.with_column(def.name(), def.type, column_kind::clustering_key);
|
||||
}
|
||||
}
|
||||
// GSIs have no tags:
|
||||
view_builder.add_extension(db::tags_extension::NAME, ::make_shared<db::tags_extension>());
|
||||
// Note below we don't need to add virtual columns, as all
|
||||
// base columns were copied to view. TODO: reconsider the need
|
||||
// for virtual columns when we support Projection.
|
||||
for (const column_definition& regular_cdef : schema->regular_columns()) {
|
||||
if (!view_builder.has_column(*cql3::to_identifier(regular_cdef))) {
|
||||
view_builder.with_column(regular_cdef.name(), regular_cdef.type, column_kind::regular_column);
|
||||
}
|
||||
}
|
||||
const bool include_all_columns = true;
|
||||
view_builder.with_view_info(*schema, include_all_columns, ""/*where clause*/);
|
||||
new_views.emplace_back(view_builder.build());
|
||||
} else if (op == "Delete") {
|
||||
elogger.trace("Deleting GSI {}", index_name);
|
||||
if (!p.local().data_dictionary().has_schema(keyspace_name, vname)) {
|
||||
co_return api_error::resource_not_found(fmt::format("No GSI {} in table {}", index_name, table_name));
|
||||
}
|
||||
dropped_views.emplace_back(vname);
|
||||
} else if (op == "Update") {
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates Update not yet supported");
|
||||
} else {
|
||||
co_return api_error::validation(fmt::format("GlobalSecondaryIndexUpdates supports a Create, Delete or Update operation, saw '{}'", op));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (empty_request) {
|
||||
co_return api_error::validation("UpdateTable requires one of GlobalSecondaryIndexUpdates, StreamSpecification or BillingMode to be specified");
|
||||
}
|
||||
|
||||
co_await verify_permission(enforce_authorization, client_state_other_shard.get(), schema, auth::permission::ALTER);
|
||||
auto m = co_await service::prepare_column_family_update_announcement(p.local(), schema, std::vector<view_ptr>(), group0_guard.write_timestamp());
|
||||
for (view_ptr view : new_views) {
|
||||
auto m2 = co_await service::prepare_new_view_announcement(p.local(), view, group0_guard.write_timestamp());
|
||||
std::move(m2.begin(), m2.end(), std::back_inserter(m));
|
||||
}
|
||||
for (const std::string& view_name : dropped_views) {
|
||||
auto m2 = co_await service::prepare_view_drop_announcement(p.local(), schema->ks_name(), view_name, group0_guard.write_timestamp());
|
||||
std::move(m2.begin(), m2.end(), std::back_inserter(m));
|
||||
}
|
||||
// If a role is allowed to create a GSI, we should give it permissions
|
||||
// to read the GSI it just created. This is known as "auto-grant".
|
||||
// Also, when we delete a GSI we should revoke any permissions set on
|
||||
// it - so if it's ever created again the old permissions wouldn't be
|
||||
// remembered for the new GSI. This is known as "auto-revoke"
|
||||
if (client_state_other_shard.get().user() && (!new_views.empty() || !dropped_views.empty())) {
|
||||
service::group0_batch mc(std::move(group0_guard));
|
||||
mc.add_mutations(std::move(m));
|
||||
for (view_ptr view : new_views) {
|
||||
auto resource = auth::make_data_resource(view->ks_name(), view->cf_name());
|
||||
co_await auth::grant_applicable_permissions(
|
||||
*client_state_other_shard.get().get_auth_service(), *client_state_other_shard.get().user(), resource, mc);
|
||||
}
|
||||
for (const auto& view_name : dropped_views) {
|
||||
auto resource = auth::make_data_resource(schema->ks_name(), view_name);
|
||||
co_await auth::revoke_all(*client_state_other_shard.get().get_auth_service(), resource, mc);
|
||||
}
|
||||
std::tie(m, group0_guard) = co_await std::move(mc).extract();
|
||||
}
|
||||
auto m = co_await service::prepare_column_family_update_announcement(p.local(), schema, std::vector<view_ptr>(), group0_guard.write_timestamp());
|
||||
|
||||
co_await mm.announce(std::move(m), std::move(group0_guard), format("alternator-executor: update {} table", tab->cf_name()));
|
||||
|
||||
@@ -1941,7 +1545,7 @@ public:
|
||||
struct delete_item {};
|
||||
struct put_item {};
|
||||
put_or_delete_item(const rjson::value& key, schema_ptr schema, delete_item);
|
||||
put_or_delete_item(const rjson::value& item, schema_ptr schema, put_item, std::unordered_map<bytes, std::string> key_attributes);
|
||||
put_or_delete_item(const rjson::value& item, schema_ptr schema, put_item);
|
||||
// put_or_delete_item doesn't keep a reference to schema (so it can be
|
||||
// moved between shards for LWT) so it needs to be given again to build():
|
||||
mutation build(schema_ptr schema, api::timestamp_type ts) const;
|
||||
@@ -1973,75 +1577,7 @@ static inline const column_definition* find_attribute(const schema& schema, cons
|
||||
return cdef;
|
||||
}
|
||||
|
||||
|
||||
// Get a list of all attributes that serve as a key attributes for any of the
|
||||
// GSIs or LSIs of this table, and the declared type for each (can be only
|
||||
// "S", "B", or "N"). The implementation below will also list the base table's
|
||||
// key columns (they are the views' clustering keys).
|
||||
std::unordered_map<bytes, std::string> si_key_attributes(data_dictionary::table t) {
|
||||
std::unordered_map<bytes, std::string> ret;
|
||||
for (const view_ptr& v : t.views()) {
|
||||
for (const column_definition& cdef : v->partition_key_columns()) {
|
||||
ret[cdef.name()] = type_to_string(cdef.type);
|
||||
}
|
||||
for (const column_definition& cdef : v->clustering_key_columns()) {
|
||||
ret[cdef.name()] = type_to_string(cdef.type);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
// When an attribute is a key (hash or sort) of one of the GSIs on a table,
|
||||
// DynamoDB refuses an update to that attribute with an unsuitable value.
|
||||
// Unsuitable values are:
|
||||
// 1. An empty string (those are normally allowed as values, but not allowed
|
||||
// as keys, including GSI keys).
|
||||
// 2. A value with a type different than that declared for the GSI key.
|
||||
// Normally non-key attributes can take values of any type (DynamoDB is
|
||||
// schema-less), but as soon as an attribute is used as a GSI key, it
|
||||
// must be set only to the specific type declared for that key.
|
||||
// (Note that a missing value for an GSI key attribute is fine - the update
|
||||
// will happen on the base table, but won't reach the view table. In this
|
||||
// case, this function simply won't be called for this attribute.)
|
||||
//
|
||||
// This function checks if the given attribute update is an update to some
|
||||
// GSI's key, and if the value is unsuitable, a api_error::validation is
|
||||
// thrown. The checking here is similar to the checking done in
|
||||
// get_key_from_typed_value() for the base table's key columns.
|
||||
//
|
||||
// validate_value_if_gsi_key() should only be called after validate_value()
|
||||
// already validated that the value itself has a valid form.
|
||||
static inline void validate_value_if_gsi_key(
|
||||
std::unordered_map<bytes, std::string> key_attributes,
|
||||
const bytes& attribute,
|
||||
const rjson::value& value) {
|
||||
if (key_attributes.empty()) {
|
||||
return;
|
||||
}
|
||||
auto it = key_attributes.find(attribute);
|
||||
if (it == key_attributes.end()) {
|
||||
// Given attribute is not a key column with a fixed type, so no
|
||||
// more validation to do.
|
||||
return;
|
||||
}
|
||||
const std::string& expected_type = it->second;
|
||||
// We assume that validate_value() was previously called on this value,
|
||||
// so value is known to be of the proper format (an object with one
|
||||
// member, whose key and value are strings)
|
||||
std::string_view value_type = rjson::to_string_view(value.MemberBegin()->name);
|
||||
if (expected_type != value_type) {
|
||||
throw api_error::validation(fmt::format(
|
||||
"Type mismatch: expected type {} for GSI key attribute {}, got type {}",
|
||||
expected_type, to_string_view(attribute), value_type));
|
||||
}
|
||||
std::string_view value_content = rjson::to_string_view(value.MemberBegin()->value);
|
||||
if (value_content.empty()) {
|
||||
throw api_error::validation(fmt::format(
|
||||
"GSI key attribute {} cannot be set to an empty string", to_string_view(attribute)));
|
||||
}
|
||||
}
|
||||
|
||||
put_or_delete_item::put_or_delete_item(const rjson::value& item, schema_ptr schema, put_item, std::unordered_map<bytes, std::string> key_attributes)
|
||||
put_or_delete_item::put_or_delete_item(const rjson::value& item, schema_ptr schema, put_item)
|
||||
: _pk(pk_from_json(item, schema)), _ck(ck_from_json(item, schema)) {
|
||||
_cells = std::vector<cell>();
|
||||
_cells->reserve(item.MemberCount());
|
||||
@@ -2051,9 +1587,6 @@ put_or_delete_item::put_or_delete_item(const rjson::value& item, schema_ptr sche
|
||||
const column_definition* cdef = find_attribute(*schema, column_name);
|
||||
_length_in_bytes += column_name.size();
|
||||
if (!cdef) {
|
||||
// This attribute may be a key column of one of the GSI, in which
|
||||
// case there are some limitations on the value
|
||||
validate_value_if_gsi_key(key_attributes, column_name, it->value);
|
||||
bytes value = serialize_item(it->value);
|
||||
if (value.size()) {
|
||||
// ScyllaDB uses one extra byte compared to DynamoDB for the bytes length
|
||||
@@ -2061,7 +1594,7 @@ put_or_delete_item::put_or_delete_item(const rjson::value& item, schema_ptr sche
|
||||
}
|
||||
_cells->push_back({std::move(column_name), serialize_item(it->value)});
|
||||
} else if (!cdef->is_primary_key()) {
|
||||
// Fixed-type regular column can be used for LSI key
|
||||
// Fixed-type regular column can be used for GSI key
|
||||
bytes value = get_key_from_typed_value(it->value, *cdef);
|
||||
_cells->push_back({std::move(column_name),
|
||||
value});
|
||||
@@ -2420,8 +1953,7 @@ public:
|
||||
parsed::condition_expression _condition_expression;
|
||||
put_item_operation(service::storage_proxy& proxy, rjson::value&& request)
|
||||
: rmw_operation(proxy, std::move(request))
|
||||
, _mutation_builder(rjson::get(_request, "Item"), schema(), put_or_delete_item::put_item{},
|
||||
si_key_attributes(proxy.data_dictionary().find_table(schema()->ks_name(), schema()->cf_name()))) {
|
||||
, _mutation_builder(rjson::get(_request, "Item"), schema(), put_or_delete_item::put_item{}) {
|
||||
_pk = _mutation_builder.pk();
|
||||
_ck = _mutation_builder.ck();
|
||||
if (_returnvalues != returnvalues::NONE && _returnvalues != returnvalues::ALL_OLD) {
|
||||
@@ -2782,8 +2314,7 @@ future<executor::request_return_type> executor::batch_write_item(client_state& c
|
||||
const rjson::value& put_request = r->value;
|
||||
const rjson::value& item = put_request["Item"];
|
||||
mutation_builders.emplace_back(schema, put_or_delete_item(
|
||||
item, schema, put_or_delete_item::put_item{},
|
||||
si_key_attributes(_proxy.data_dictionary().find_table(schema->ks_name(), schema->cf_name()))));
|
||||
item, schema, put_or_delete_item::put_item{}));
|
||||
auto mut_key = std::make_pair(mutation_builders.back().second.pk(), mutation_builders.back().second.ck());
|
||||
if (used_keys.contains(mut_key)) {
|
||||
co_return api_error::validation("Provided list of item keys contains duplicates");
|
||||
@@ -3276,47 +2807,6 @@ static bool check_needs_read_before_write(const attribute_path_map<parsed::updat
|
||||
});
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief estimate_value_size provides a rough size estimation
|
||||
* for an rjson value object.
|
||||
*
|
||||
* When calculating RCU and WCU, we need to determine the length of the JSON representation
|
||||
* (specifically, the length of each key and each value).
|
||||
*
|
||||
* When possible, this is calculated as a side effect of other operations.
|
||||
* estimate_value_size is used when this calculation cannot be performed directly,
|
||||
* but we still need an estimated value.
|
||||
*
|
||||
* It achieves this without streaming any values and uses a fixed size for numbers.
|
||||
* The aim is not to provide a perfect 1-to-1 size calculation, as WCU is calculated
|
||||
* in 1KB units. A ballpark estimate is sufficient.
|
||||
*/
|
||||
static size_t estimate_value_size(const rjson::value& value) {
|
||||
size_t size = 0;
|
||||
|
||||
if (value.IsString()) {
|
||||
size += value.GetStringLength();
|
||||
}
|
||||
else if (value.IsNumber()) {
|
||||
size += 8;
|
||||
}
|
||||
else if (value.IsBool()) {
|
||||
size += 5;
|
||||
}
|
||||
else if (value.IsArray()) {
|
||||
for (auto& v : value.GetArray()) {
|
||||
size += estimate_value_size(v); // Recursively calculate array element sizes
|
||||
}
|
||||
}
|
||||
else if (value.IsObject()) {
|
||||
for (auto it = value.MemberBegin(); it != value.MemberEnd(); ++it) {
|
||||
size += it->name.GetStringLength(); // Size of the key
|
||||
size += estimate_value_size(it->value); // Size of the value
|
||||
}
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
class update_item_operation : public rmw_operation {
|
||||
public:
|
||||
// Some information parsed during the constructor to check for input
|
||||
@@ -3327,10 +2817,6 @@ public:
|
||||
// them by top-level attribute, and detects forbidden overlaps/conflicts.
|
||||
attribute_path_map<parsed::update_expression::action> _update_expression;
|
||||
|
||||
// Saved list of GSI keys in the table being updated, used for
|
||||
// validate_value_if_gsi_key()
|
||||
std::unordered_map<bytes, std::string> _key_attributes;
|
||||
|
||||
parsed::condition_expression _condition_expression;
|
||||
|
||||
update_item_operation(service::storage_proxy& proxy, rjson::value&& request);
|
||||
@@ -3408,23 +2894,6 @@ update_item_operation::update_item_operation(service::storage_proxy& proxy, rjso
|
||||
throw api_error::validation(
|
||||
format("UpdateItem does not allow both old-style AttributeUpdates and new-style ConditionExpression to be given together"));
|
||||
}
|
||||
if (_pk.representation().size() > 2) {
|
||||
// ScyllaDB uses two extra bytes compared to DynamoDB for the key bytes length
|
||||
_consumed_capacity._total_bytes += _pk.representation().size() - 2;
|
||||
}
|
||||
if (_ck.representation().size() > 2) {
|
||||
// ScyllaDB uses two extra bytes compared to DynamoDB for the key bytes length
|
||||
_consumed_capacity._total_bytes += _ck.representation().size() - 2;
|
||||
}
|
||||
if (expression_attribute_names) {
|
||||
_consumed_capacity._total_bytes += estimate_value_size(*expression_attribute_names);
|
||||
}
|
||||
if (expression_attribute_values) {
|
||||
_consumed_capacity._total_bytes += estimate_value_size(*expression_attribute_values);
|
||||
}
|
||||
|
||||
_key_attributes = si_key_attributes(proxy.data_dictionary().find_table(
|
||||
_schema->ks_name(), _schema->cf_name()));
|
||||
}
|
||||
|
||||
// These are the cases where update_item_operation::apply() needs to use
|
||||
@@ -3659,9 +3128,6 @@ static bool hierarchy_actions(
|
||||
|
||||
std::optional<mutation>
|
||||
update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::timestamp_type ts) const {
|
||||
if (_consumed_capacity._total_bytes == 0) {
|
||||
_consumed_capacity._total_bytes = 1;
|
||||
}
|
||||
if (!verify_expected(_request, previous_item.get()) ||
|
||||
!verify_condition_expression(_condition_expression, previous_item.get())) {
|
||||
if (previous_item && _returnvalues_on_condition_check_failure ==
|
||||
@@ -3722,9 +3188,6 @@ update_item_operation::apply(std::unique_ptr<rjson::value> previous_item, api::t
|
||||
bytes column_value = get_key_from_typed_value(json_value, *cdef);
|
||||
row.cells().apply(*cdef, atomic_cell::make_live(*cdef->type, ts, column_value));
|
||||
} else {
|
||||
// This attribute may be a key column of one of the GSIs, in which
|
||||
// case there are some limitations on the value.
|
||||
validate_value_if_gsi_key(_key_attributes, column_name, json_value);
|
||||
attrs_collector.put(std::move(column_name), serialize_item(json_value), ts);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* Copyright 2024-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
#include "utils/rjson.hh"
|
||||
#include "serialization.hh"
|
||||
#include "column_computation.hh"
|
||||
#include "db/view/regular_column_transformation.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// An implementation of a "column_computation" which extracts a specific
|
||||
// non-key attribute from the big map (":attrs") of all non-key attributes,
|
||||
// and deserializes it if it has the desired type. GSI will use this computed
|
||||
// column as a materialized-view key when the view key attribute isn't a
|
||||
// full-fledged CQL column but rather stored in ":attrs".
|
||||
class extract_from_attrs_column_computation : public regular_column_transformation {
|
||||
// The name of the CQL column name holding the attribute map. It is a
|
||||
// constant defined in executor.cc (as ":attrs"), so doesn't need
|
||||
// to be specified when constructing the column computation.
|
||||
static const bytes MAP_NAME;
|
||||
// The top-level attribute name to extract from the ":attrs" map.
|
||||
bytes _attr_name;
|
||||
// The type we expect for the value stored in the attribute. If the type
|
||||
// matches the expected type, it is decoded from the serialized format
|
||||
// we store in the map's values) into the raw CQL type value that we use
|
||||
// for keys, and returned by compute_value(). Only the types "S" (string),
|
||||
// "B" (bytes) and "N" (number) are allowed as keys in DynamoDB, and
|
||||
// therefore in desired_type.
|
||||
alternator_type _desired_type;
|
||||
public:
|
||||
virtual column_computation_ptr clone() const override;
|
||||
// TYPE_NAME is a unique string that distinguishes this class from other
|
||||
// column_computation subclasses. column_computation::deserialize() will
|
||||
// construct an object of this subclass if it sees a "type" TYPE_NAME.
|
||||
static inline const std::string TYPE_NAME = "alternator_extract_from_attrs";
|
||||
// Serialize the *definition* of this column computation into a JSON
|
||||
// string with a unique "type" string - TYPE_NAME - which then causes
|
||||
// column_computation::deserialize() to create an object from this class.
|
||||
virtual bytes serialize() const override;
|
||||
// Construct this object based on the previous output of serialize().
|
||||
// Calls on_internal_error() if the string doesn't match the output format
|
||||
// of serialize(). "type" is not checked column_computation::deserialize()
|
||||
// won't call this constructor if "type" doesn't match.
|
||||
extract_from_attrs_column_computation(const rjson::value &v);
|
||||
extract_from_attrs_column_computation(bytes_view attr_name, alternator_type desired_type)
|
||||
: _attr_name(attr_name), _desired_type(desired_type)
|
||||
{}
|
||||
// Implement regular_column_transformation's compute_value() that
|
||||
// accepts the full row:
|
||||
result compute_value(const schema& schema, const partition_key& key,
|
||||
const db::view::clustering_or_static_row& row) const override;
|
||||
// But do not implement column_computation's compute_value() that
|
||||
// accepts only a partition key - that's not enough so our implementation
|
||||
// of this function does on_internal_error().
|
||||
bytes compute_value(const schema& schema, const partition_key& key) const override;
|
||||
// This computed column does depend on a non-primary key column, so
|
||||
// its result may change in the update and we need to compute it
|
||||
// before and after the update.
|
||||
virtual bool depends_on_non_primary_key_column() const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
} // namespace alternator
|
||||
@@ -245,27 +245,6 @@ rjson::value deserialize_item(bytes_view bv) {
|
||||
return deserialized;
|
||||
}
|
||||
|
||||
// This function takes a bytes_view created earlier by serialize_item(), and
|
||||
// if has the type "expected_type", the function returns the value as a
|
||||
// raw Scylla type. If the type doesn't match, returns an unset optional.
|
||||
// This function only supports the key types S (string), B (bytes) and N
|
||||
// (number) - serialize_item() serializes those types as a single-byte type
|
||||
// followed by the serialized raw Scylla type, so all this function needs to
|
||||
// do is to remove the first byte. This makes this function much more
|
||||
// efficient than deserialize_item() above because it avoids transformation
|
||||
// to/from JSON.
|
||||
std::optional<bytes> serialized_value_if_type(bytes_view bv, alternator_type expected_type) {
|
||||
if (bv.empty() || alternator_type(bv[0]) != expected_type) {
|
||||
return std::nullopt;
|
||||
}
|
||||
// Currently, serialize_item() for types in alternator_type (notably S, B
|
||||
// and N) are nothing more than Scylla's raw format for these types
|
||||
// preceded by a type byte. So we just need to skip that byte and we are
|
||||
// left by exactly what we need to return.
|
||||
bv.remove_prefix(1);
|
||||
return bytes(bv);
|
||||
}
|
||||
|
||||
std::string type_to_string(data_type type) {
|
||||
static thread_local std::unordered_map<data_type, std::string> types = {
|
||||
{utf8_type, "S"},
|
||||
|
||||
@@ -43,7 +43,6 @@ type_representation represent_type(alternator_type atype);
|
||||
|
||||
bytes serialize_item(const rjson::value& item);
|
||||
rjson::value deserialize_item(bytes_view bv);
|
||||
std::optional<bytes> serialized_value_if_type(bytes_view bv, alternator_type expected_type);
|
||||
|
||||
std::string type_to_string(data_type type);
|
||||
|
||||
|
||||
@@ -217,7 +217,7 @@ protected:
|
||||
// If the DC does not exist, we return an empty list - not an error.
|
||||
sstring query_dc = req->get_query_param("dc");
|
||||
sstring local_dc = query_dc.empty() ? topology.get_datacenter() : query_dc;
|
||||
std::unordered_set<locator::host_id> local_dc_nodes;
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes;
|
||||
const auto& endpoints = topology.get_datacenter_endpoints();
|
||||
auto dc_it = endpoints.find(local_dc);
|
||||
if (dc_it != endpoints.end()) {
|
||||
@@ -227,8 +227,7 @@ protected:
|
||||
// DC, unless a single rack is selected by the "rack" query option.
|
||||
// If the rack does not exist, we return an empty list - not an error.
|
||||
sstring query_rack = req->get_query_param("rack");
|
||||
for (auto& id : local_dc_nodes) {
|
||||
auto ip = _gossiper.get_address_map().get(id);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
if (!query_rack.empty()) {
|
||||
auto rack = _gossiper.get_application_state_value(ip, gms::application_state::RACK);
|
||||
if (rack != query_rack) {
|
||||
@@ -457,16 +456,9 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
||||
|
||||
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content);
|
||||
tracing::trace(trace_state, "{}", op);
|
||||
|
||||
auto user = client_state.user();
|
||||
auto f = [this, content = std::move(content), &callback = callback_it->second,
|
||||
client_state = std::move(client_state), trace_state = std::move(trace_state),
|
||||
units = std::move(units), req = std::move(req)] () mutable -> future<executor::request_return_type> {
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
co_return co_await callback(_executor, client_state, trace_state,
|
||||
make_service_permit(std::move(units)), std::move(json_request), std::move(req));
|
||||
};
|
||||
co_return co_await _sl_controller.with_user_service_level(user, std::ref(f));
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
co_return co_await callback_it->second(_executor, client_state, trace_state,
|
||||
make_service_permit(std::move(units)), std::move(json_request), std::move(req));
|
||||
}
|
||||
|
||||
void server::set_routes(routes& r) {
|
||||
@@ -606,24 +598,14 @@ future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std:
|
||||
set_routes(_https_server._routes);
|
||||
_https_server.set_content_length_limit(server::content_length_limit);
|
||||
_https_server.set_content_streaming(true);
|
||||
|
||||
if (this_shard_id() == 0) {
|
||||
_credentials = creds->build_reloadable_server_credentials([this](const tls::credentials_builder& b, const std::unordered_set<sstring>& files, std::exception_ptr ep) -> future<> {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
co_await container().invoke_on_others([&b](server& s) {
|
||||
if (s._credentials) {
|
||||
b.rebuild(*s._credentials);
|
||||
}
|
||||
});
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get();
|
||||
} else {
|
||||
_credentials = creds->build_server_credentials();
|
||||
}
|
||||
_https_server.listen(socket_address{addr, *https_port}, _credentials).get();
|
||||
auto server_creds = creds->build_reloadable_server_credentials([](const std::unordered_set<sstring>& files, std::exception_ptr ep) {
|
||||
if (ep) {
|
||||
slogger.warn("Exception loading {}: {}", files, ep);
|
||||
} else {
|
||||
slogger.info("Reloaded {}", files);
|
||||
}
|
||||
}).get();
|
||||
_https_server.listen(socket_address{addr, *https_port}, std::move(server_creds)).get();
|
||||
_enabled_servers.push_back(std::ref(_https_server));
|
||||
}
|
||||
});
|
||||
|
||||
@@ -24,7 +24,7 @@ namespace alternator {
|
||||
|
||||
using chunked_content = rjson::chunked_content;
|
||||
|
||||
class server : public peering_sharded_service<server> {
|
||||
class server {
|
||||
static constexpr size_t content_length_limit = 16*MB;
|
||||
using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&,
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>)>;
|
||||
@@ -52,8 +52,6 @@ class server : public peering_sharded_service<server> {
|
||||
semaphore* _memory_limiter;
|
||||
utils::updateable_value<uint32_t> _max_concurrent_requests;
|
||||
|
||||
::shared_ptr<seastar::tls::server_credentials> _credentials;
|
||||
|
||||
class json_parser {
|
||||
static constexpr size_t yieldable_parsing_threshold = 16*KB;
|
||||
chunked_content _raw_document;
|
||||
|
||||
@@ -9,7 +9,6 @@
|
||||
#include "stats.hh"
|
||||
#include "utils/histogram_metrics_helper.hh"
|
||||
#include <seastar/core/metrics.hh>
|
||||
#include "utils/labels.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
@@ -22,12 +21,12 @@ stats::stats() : api_operations{} {
|
||||
_metrics.add_group("alternator", {
|
||||
#define OPERATION(name, CamelCaseName) \
|
||||
seastar::metrics::make_total_operations("operation", api_operations.name, \
|
||||
seastar::metrics::description("number of operations via Alternator API"), {op(CamelCaseName), alternator_label, basic_level}).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of operations via Alternator API"), {op(CamelCaseName)}).set_skip_when_empty(),
|
||||
#define OPERATION_LATENCY(name, CamelCaseName) \
|
||||
seastar::metrics::make_histogram("op_latency", \
|
||||
seastar::metrics::description("Latency histogram of an operation via Alternator API"), {op(CamelCaseName), alternator_label, basic_level}, [this]{return to_metrics_histogram(api_operations.name.histogram());}).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), \
|
||||
seastar::metrics::description("Latency histogram of an operation via Alternator API"), {op(CamelCaseName)}, [this]{return to_metrics_histogram(api_operations.name.histogram());}).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), \
|
||||
seastar::metrics::make_summary("op_latency_summary", \
|
||||
seastar::metrics::description("Latency summary of an operation via Alternator API"), [this]{return to_metrics_summary(api_operations.name.summary());})(op(CamelCaseName))(basic_level)(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("Latency summary of an operation via Alternator API"), [this]{return to_metrics_summary(api_operations.name.summary());})(op(CamelCaseName)).set_skip_when_empty(),
|
||||
OPERATION(batch_get_item, "BatchGetItem")
|
||||
OPERATION(batch_write_item, "BatchWriteItem")
|
||||
OPERATION(create_backup, "CreateBackup")
|
||||
@@ -78,39 +77,39 @@ stats::stats() : api_operations{} {
|
||||
});
|
||||
_metrics.add_group("alternator", {
|
||||
seastar::metrics::make_total_operations("unsupported_operations", unsupported_operations,
|
||||
seastar::metrics::description("number of unsupported operations via Alternator API"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of unsupported operations via Alternator API")),
|
||||
seastar::metrics::make_total_operations("total_operations", total_operations,
|
||||
seastar::metrics::description("number of total operations via Alternator API"))(basic_level)(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of total operations via Alternator API")),
|
||||
seastar::metrics::make_total_operations("reads_before_write", reads_before_write,
|
||||
seastar::metrics::description("number of performed read-before-write operations"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of performed read-before-write operations")),
|
||||
seastar::metrics::make_total_operations("write_using_lwt", write_using_lwt,
|
||||
seastar::metrics::description("number of writes that used LWT"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of writes that used LWT")),
|
||||
seastar::metrics::make_total_operations("shard_bounce_for_lwt", shard_bounce_for_lwt,
|
||||
seastar::metrics::description("number writes that had to be bounced from this shard because of LWT requirements"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number writes that had to be bounced from this shard because of LWT requirements")),
|
||||
seastar::metrics::make_total_operations("requests_blocked_memory", requests_blocked_memory,
|
||||
seastar::metrics::description("Counts a number of requests blocked due to memory pressure."))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("Counts a number of requests blocked due to memory pressure.")),
|
||||
seastar::metrics::make_total_operations("requests_shed", requests_shed,
|
||||
seastar::metrics::description("Counts a number of requests shed due to overload."))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("Counts a number of requests shed due to overload.")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_read_total", cql_stats.filtered_rows_read_total,
|
||||
seastar::metrics::description("number of rows read during filtering operations"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of rows read during filtering operations")),
|
||||
seastar::metrics::make_total_operations("filtered_rows_matched_total", cql_stats.filtered_rows_matched_total,
|
||||
seastar::metrics::description("number of rows read and matched during filtering operations")),
|
||||
seastar::metrics::make_counter("rcu_total", rcu_total,
|
||||
seastar::metrics::description("total number of consumed read units, counted as half units"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("total number of consumed read units, counted as half units")).set_skip_when_empty(),
|
||||
seastar::metrics::make_counter("wcu_total", wcu_total[wcu_types::PUT_ITEM],
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("PutItem")})(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("PutItem")}).set_skip_when_empty(),
|
||||
seastar::metrics::make_counter("wcu_total", wcu_total[wcu_types::DELETE_ITEM],
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("DeleteItem")})(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("DeleteItem")}).set_skip_when_empty(),
|
||||
seastar::metrics::make_counter("wcu_total", wcu_total[wcu_types::UPDATE_ITEM],
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("UpdateItem")})(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("UpdateItem")}).set_skip_when_empty(),
|
||||
seastar::metrics::make_counter("wcu_total", wcu_total[wcu_types::INDEX],
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("Index")})(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("total number of consumed write units, counted as half units"),{op("Index")}).set_skip_when_empty(),
|
||||
seastar::metrics::make_total_operations("filtered_rows_dropped_total", [this] { return cql_stats.filtered_rows_read_total - cql_stats.filtered_rows_matched_total; },
|
||||
seastar::metrics::description("number of rows read and dropped during filtering operations"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of rows read and dropped during filtering operations")),
|
||||
seastar::metrics::make_counter("batch_item_count", seastar::metrics::description("The total number of items processed across all batches"),{op("BatchWriteItem")},
|
||||
api_operations.batch_write_item_batch_total)(alternator_label).set_skip_when_empty(),
|
||||
api_operations.batch_write_item_batch_total).set_skip_when_empty(),
|
||||
seastar::metrics::make_counter("batch_item_count", seastar::metrics::description("The total number of items processed across all batches"),{op("BatchGetItem")},
|
||||
api_operations.batch_get_item_batch_total)(alternator_label).set_skip_when_empty(),
|
||||
api_operations.batch_get_item_batch_total).set_skip_when_empty(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/lowres_clock.hh>
|
||||
#include <seastar/coroutine/maybe_yield.hh>
|
||||
#include <boost/multiprecision/cpp_int.hpp>
|
||||
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
@@ -48,7 +49,6 @@
|
||||
#include "dht/sharder.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/tags/utils.hh"
|
||||
#include "utils/labels.hh"
|
||||
|
||||
#include "ttl.hh"
|
||||
|
||||
@@ -851,13 +851,13 @@ future<> expiration_service::stop() {
|
||||
expiration_service::stats::stats() {
|
||||
_metrics.add_group("expiration", {
|
||||
seastar::metrics::make_total_operations("scan_passes", scan_passes,
|
||||
seastar::metrics::description("number of passes over the database"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of passes over the database")),
|
||||
seastar::metrics::make_total_operations("scan_table", scan_table,
|
||||
seastar::metrics::description("number of table scans (counting each scan of each table that enabled expiration)"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of table scans (counting each scan of each table that enabled expiration)")),
|
||||
seastar::metrics::make_total_operations("items_deleted", items_deleted,
|
||||
seastar::metrics::description("number of items deleted after expiration"))(basic_level)(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of items deleted after expiration")),
|
||||
seastar::metrics::make_total_operations("secondary_ranges_scanned", secondary_ranges_scanned,
|
||||
seastar::metrics::description("number of token ranges scanned by this node while their primary owner was down"))(alternator_label).set_skip_when_empty(),
|
||||
seastar::metrics::description("number of token ranges scanned by this node while their primary owner was down")),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,6 @@ set(swagger_files
|
||||
api-doc/messaging_service.json
|
||||
api-doc/metrics.json
|
||||
api-doc/raft.json
|
||||
api-doc/service_levels.json
|
||||
api-doc/storage_proxy.json
|
||||
api-doc/storage_service.json
|
||||
api-doc/stream_manager.json
|
||||
@@ -83,7 +82,6 @@ target_sources(api
|
||||
lsa.cc
|
||||
messaging_service.cc
|
||||
raft.cc
|
||||
service_levels.cc
|
||||
storage_proxy.cc
|
||||
storage_service.cc
|
||||
stream_manager.cc
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
{
|
||||
"apiVersion":"0.0.1",
|
||||
"swaggerVersion":"1.2",
|
||||
"basePath":"{{Protocol}}://{{Host}}",
|
||||
"resourcePath":"/service_levels",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"apis":[
|
||||
{
|
||||
"path":"/service_levels/switch_tenants",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Switch tenants on all opened connections if needed",
|
||||
"type":"void",
|
||||
"nickname":"do_switch_tenants",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/service_levels/count_connections",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Count opened CQL connections per scheduling group per user",
|
||||
"type":"connections_count_map",
|
||||
"nickname":"count_connections",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{},
|
||||
"components": {
|
||||
"schemas": {
|
||||
"connections_count_map": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -813,14 +813,6 @@
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"move_files",
|
||||
"description":"Move component files instead of copying them",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -889,15 +881,6 @@
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"scope",
|
||||
"description":"Defines the set of nodes to which mutations can be streamed",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query",
|
||||
"enum": ["all", "dc", "rack", "node"]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1656,6 +1639,38 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/truncate/{keyspace}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Truncates (deletes) the given columnFamily from the provided keyspace. Calling truncate results in actual deletion of all data in the cluster under the given columnFamily and it will fail unless all hosts are up. All data in the given column family will be deleted, but its definition will not be affected.",
|
||||
"type":"void",
|
||||
"nickname":"truncate",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"Column family name",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/keyspaces",
|
||||
"operations":[
|
||||
@@ -2844,7 +2859,7 @@
|
||||
"nickname":"repair_tablet",
|
||||
"method":"POST",
|
||||
"summary":"Repair a tablet",
|
||||
"type":"tablet_repair_result",
|
||||
"type":"void",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
@@ -2872,30 +2887,6 @@
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"hosts_filter",
|
||||
"description":"Repair replicas listed in the comma-separated host_id list.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"dcs_filter",
|
||||
"description":"Repair replicas listed in the comma-separated DC list",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"await_completion",
|
||||
"description":"Set true to wait for the repair to complete. Set false to skip waiting for the repair to complete. When the option is not provided, it defaults to false.",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -3319,15 +3310,6 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tablet_repair_result":{
|
||||
"id":"tablet_repair_result",
|
||||
"description":"Tablet repair result",
|
||||
"properties":{
|
||||
"tablet_task_id":{
|
||||
"type":"string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,30 +253,6 @@
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/task_manager/drain/{module}",
|
||||
"operations":[
|
||||
{
|
||||
"method":"POST",
|
||||
"summary":"Drain finished local tasks",
|
||||
"type":"void",
|
||||
"nickname":"drain_tasks",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"module",
|
||||
"description":"The module to drain",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"models":{
|
||||
@@ -308,8 +284,7 @@
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed",
|
||||
"suspended"
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of a task"
|
||||
},
|
||||
@@ -344,18 +319,6 @@
|
||||
"sequence_number":{
|
||||
"type":"long",
|
||||
"description":"The running sequence number of the task"
|
||||
},
|
||||
"shard":{
|
||||
"type":"long",
|
||||
"description":"The shard the task is running on"
|
||||
},
|
||||
"start_time":{
|
||||
"type":"datetime",
|
||||
"description":"The start time of the task; unspecified (equal to epoch) when state == created"
|
||||
},
|
||||
"end_time":{
|
||||
"type":"datetime",
|
||||
"description":"The end time of the task; unspecified (equal to epoch) when the task is not completed"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -389,8 +352,7 @@
|
||||
"created",
|
||||
"running",
|
||||
"done",
|
||||
"failed",
|
||||
"suspended"
|
||||
"failed"
|
||||
],
|
||||
"description":"The state of the task"
|
||||
},
|
||||
|
||||
29
api/api.cc
29
api/api.cc
@@ -36,7 +36,6 @@
|
||||
#include "tasks.hh"
|
||||
#include "raft.hh"
|
||||
#include "gms/gossip_address_map.hh"
|
||||
#include "service_levels.hh"
|
||||
|
||||
logging::logger apilog("api");
|
||||
|
||||
@@ -81,7 +80,7 @@ future<> set_server_init(http_context& ctx) {
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_config(http_context& ctx, db::config& cfg) {
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg) {
|
||||
auto rb02 = std::make_shared < api_registry_builder20 > (ctx.api_doc, "/v2");
|
||||
return ctx.http_server.set_routes([&ctx, &cfg, rb02](routes& r) {
|
||||
set_config(rb02, ctx, r, cfg, false);
|
||||
@@ -153,8 +152,8 @@ future<> unset_server_sstables_loader(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_sstables_loader(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g) {
|
||||
return ctx.http_server.set_routes([&ctx, &vb, &g] (routes& r) { set_view_builder(ctx, r, vb, g); });
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb) {
|
||||
return ctx.http_server.set_routes([&ctx, &vb] (routes& r) { set_view_builder(ctx, r, vb); });
|
||||
}
|
||||
|
||||
future<> unset_server_view_builder(http_context& ctx) {
|
||||
@@ -188,8 +187,8 @@ future<> unset_server_snapshot(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_snapshot(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_token_metadata(http_context& ctx, sharded<locator::shared_token_metadata>& tm, sharded<gms::gossiper>& g) {
|
||||
return ctx.http_server.set_routes([&ctx, &tm, &g] (routes& r) { set_token_metadata(ctx, r, tm, g); });
|
||||
future<> set_server_token_metadata(http_context& ctx, sharded<locator::shared_token_metadata>& tm) {
|
||||
return ctx.http_server.set_routes([&ctx, &tm] (routes& r) { set_token_metadata(ctx, r, tm); });
|
||||
}
|
||||
|
||||
future<> unset_server_token_metadata(http_context& ctx) {
|
||||
@@ -273,10 +272,10 @@ future<> unset_server_cache(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_cache_service(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& proxy, sharded<gms::gossiper>& g) {
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& proxy) {
|
||||
return register_api(ctx, "hinted_handoff",
|
||||
"The hinted handoff API", [&proxy, &g] (http_context& ctx, routes& r) {
|
||||
set_hinted_handoff(ctx, r, proxy, g);
|
||||
"The hinted handoff API", [&proxy] (http_context& ctx, routes& r) {
|
||||
set_hinted_handoff(ctx, r, proxy);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -317,13 +316,13 @@ future<> unset_server_commitlog(http_context& ctx) {
|
||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_commitlog(ctx, r); });
|
||||
}
|
||||
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg, sharded<gms::gossiper>& gossiper) {
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
return ctx.http_server.set_routes([rb, &ctx, &tm, &cfg = *cfg, &gossiper](routes& r) {
|
||||
return ctx.http_server.set_routes([rb, &ctx, &tm, &cfg = *cfg](routes& r) {
|
||||
rb->register_function(r, "task_manager",
|
||||
"The task manager API");
|
||||
set_task_manager(ctx, r, tm, cfg, gossiper);
|
||||
set_task_manager(ctx, r, tm, cfg);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -359,12 +358,6 @@ future<> unset_server_cql_server_test(http_context& ctx) {
|
||||
|
||||
#endif
|
||||
|
||||
future<> set_server_service_levels(http_context &ctx, cql_transport::controller& ctl, sharded<cql3::query_processor>& qp) {
|
||||
return register_api(ctx, "service_levels", "The service levels API", [&ctl, &qp] (http_context& ctx, routes& r) {
|
||||
set_service_levels(ctx, r, ctl, qp);
|
||||
});
|
||||
}
|
||||
|
||||
future<> set_server_tasks_compaction_module(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& snap_ctl) {
|
||||
auto rb = std::make_shared < api_registry_builder > (ctx.api_doc);
|
||||
|
||||
|
||||
@@ -73,10 +73,6 @@ namespace tasks {
|
||||
class task_manager;
|
||||
}
|
||||
|
||||
namespace cql3 {
|
||||
class query_processor;
|
||||
}
|
||||
|
||||
namespace api {
|
||||
|
||||
struct http_context {
|
||||
@@ -92,7 +88,7 @@ struct http_context {
|
||||
};
|
||||
|
||||
future<> set_server_init(http_context& ctx);
|
||||
future<> set_server_config(http_context& ctx, db::config& cfg);
|
||||
future<> set_server_config(http_context& ctx, const db::config& cfg);
|
||||
future<> unset_server_config(http_context& ctx);
|
||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
|
||||
future<> unset_server_snitch(http_context& ctx);
|
||||
@@ -100,7 +96,7 @@ future<> set_server_storage_service(http_context& ctx, sharded<service::storage_
|
||||
future<> unset_server_storage_service(http_context& ctx);
|
||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
||||
future<> unset_server_sstables_loader(http_context& ctx);
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g);
|
||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb);
|
||||
future<> unset_server_view_builder(http_context& ctx);
|
||||
future<> set_server_repair(http_context& ctx, sharded<repair_service>& repair, sharded<gms::gossip_address_map>& am);
|
||||
future<> unset_server_repair(http_context& ctx);
|
||||
@@ -112,7 +108,7 @@ future<> set_server_authorization_cache(http_context& ctx, sharded<auth::service
|
||||
future<> unset_server_authorization_cache(http_context& ctx);
|
||||
future<> set_server_snapshot(http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl);
|
||||
future<> unset_server_snapshot(http_context& ctx);
|
||||
future<> set_server_token_metadata(http_context& ctx, sharded<locator::shared_token_metadata>& tm, sharded<gms::gossiper>& g);
|
||||
future<> set_server_token_metadata(http_context& ctx, sharded<locator::shared_token_metadata>& tm);
|
||||
future<> unset_server_token_metadata(http_context& ctx);
|
||||
future<> set_server_gossip(http_context& ctx, sharded<gms::gossiper>& g);
|
||||
future<> unset_server_gossip(http_context& ctx);
|
||||
@@ -124,14 +120,14 @@ future<> set_server_storage_proxy(http_context& ctx, sharded<service::storage_pr
|
||||
future<> unset_server_storage_proxy(http_context& ctx);
|
||||
future<> set_server_stream_manager(http_context& ctx, sharded<streaming::stream_manager>& sm);
|
||||
future<> unset_server_stream_manager(http_context& ctx);
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& p, sharded<gms::gossiper>& g);
|
||||
future<> set_hinted_handoff(http_context& ctx, sharded<service::storage_proxy>& p);
|
||||
future<> unset_hinted_handoff(http_context& ctx);
|
||||
future<> set_server_cache(http_context& ctx);
|
||||
future<> unset_server_cache(http_context& ctx);
|
||||
future<> set_server_compaction_manager(http_context& ctx, sharded<compaction_manager>& cm);
|
||||
future<> unset_server_compaction_manager(http_context& ctx);
|
||||
future<> set_server_done(http_context& ctx);
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg, sharded<gms::gossiper>& gossiper);
|
||||
future<> set_server_task_manager(http_context& ctx, sharded<tasks::task_manager>& tm, lw_shared_ptr<db::config> cfg);
|
||||
future<> unset_server_task_manager(http_context& ctx);
|
||||
future<> set_server_task_manager_test(http_context& ctx, sharded<tasks::task_manager>& tm);
|
||||
future<> unset_server_task_manager_test(http_context& ctx);
|
||||
@@ -145,7 +141,6 @@ future<> set_format_selector(http_context& ctx, db::sstables_format_selector& se
|
||||
future<> unset_format_selector(http_context& ctx);
|
||||
future<> set_server_cql_server_test(http_context& ctx, cql_transport::controller& ctl);
|
||||
future<> unset_server_cql_server_test(http_context& ctx);
|
||||
future<> set_server_service_levels(http_context& ctx, cql_transport::controller& ctl, sharded<cql3::query_processor>& qp);
|
||||
future<> set_server_commitlog(http_context& ctx, sharded<replica::database>&);
|
||||
future<> unset_server_commitlog(http_context& ctx);
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#include "api/api-doc/collectd.json.hh"
|
||||
#include <seastar/core/scollectd.hh>
|
||||
#include <seastar/core/scollectd_api.hh>
|
||||
#include <boost/range/irange.hpp>
|
||||
#include <ranges>
|
||||
#include <regex>
|
||||
#include "api/api_init.hh"
|
||||
|
||||
@@ -24,6 +24,9 @@
|
||||
#include "compaction/compaction_manager.hh"
|
||||
#include "unimplemented.hh"
|
||||
|
||||
#include <boost/range/algorithm/copy.hpp>
|
||||
#include <boost/range/numeric.hpp>
|
||||
|
||||
extern logging::logger apilog;
|
||||
|
||||
namespace api {
|
||||
@@ -48,9 +51,17 @@ std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name) {
|
||||
return std::make_tuple(name.substr(0, pos), name.substr(end));
|
||||
}
|
||||
|
||||
table_info parse_table_info(const sstring& name, const replica::database& db) {
|
||||
table_id get_uuid(const sstring& ks, const sstring& cf, const replica::database& db) {
|
||||
try {
|
||||
return db.find_uuid(ks, cf);
|
||||
} catch (replica::no_such_column_family& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
}
|
||||
|
||||
table_id get_uuid(const sstring& name, const replica::database& db) {
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(name);
|
||||
return table_info{ .name = cf, .id = validate_table(db, ks, cf) };
|
||||
return get_uuid(ks, cf, db);
|
||||
}
|
||||
|
||||
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
|
||||
@@ -67,11 +78,15 @@ future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> for_tables_on_all_shards(http_context& ctx, std::vector<table_info> tables, std::function<future<>(replica::table&)> set) {
|
||||
return do_with(std::move(tables), [&ctx, set] (const std::vector<table_info>& tables) {
|
||||
return ctx.db.invoke_on_all([&tables, set] (replica::database& db) {
|
||||
return parallel_for_each(tables, [&db, set] (const table_info& table) {
|
||||
replica::table& t = db.find_column_family(table.id);
|
||||
static future<json::json_return_type> for_tables_on_all_shards(http_context& ctx, const sstring& keyspace, std::vector<sstring> tables, std::function<future<>(replica::table&)> set) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
|
||||
return do_with(keyspace, std::move(tables), [&ctx, set] (const sstring& keyspace, const std::vector<sstring>& tables) {
|
||||
return ctx.db.invoke_on_all([&keyspace, &tables, set] (replica::database& db) {
|
||||
return parallel_for_each(tables, [&db, &keyspace, set] (const sstring& table) {
|
||||
replica::table& t = db.find_column_family(keyspace, table);
|
||||
return set(t);
|
||||
});
|
||||
});
|
||||
@@ -98,12 +113,12 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static future<json::json_return_type> set_tables_autocompaction(http_context& ctx, std::vector<table_info> tables, bool enabled) {
|
||||
apilog.info("set_tables_autocompaction: enabled={} tables={}", enabled, tables);
|
||||
static future<json::json_return_type> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
apilog.info("set_tables_autocompaction: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
|
||||
return ctx.db.invoke_on(0, [&ctx, tables = std::move(tables), enabled] (replica::database& db) {
|
||||
return ctx.db.invoke_on(0, [&ctx, keyspace, tables = std::move(tables), enabled] (replica::database& db) {
|
||||
auto g = autocompaction_toggle_guard(db);
|
||||
return for_tables_on_all_shards(ctx, tables, [enabled] (replica::table& cf) {
|
||||
return for_tables_on_all_shards(ctx, keyspace, tables, [enabled] (replica::table& cf) {
|
||||
if (enabled) {
|
||||
cf.enable_auto_compaction();
|
||||
} else {
|
||||
@@ -114,9 +129,9 @@ static future<json::json_return_type> set_tables_autocompaction(http_context& ct
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> set_tables_tombstone_gc(http_context& ctx, std::vector<table_info> tables, bool enabled) {
|
||||
apilog.info("set_tables_tombstone_gc: enabled={} tables={}", enabled, tables);
|
||||
return for_tables_on_all_shards(ctx, std::move(tables), [enabled] (replica::table& t) {
|
||||
static future<json::json_return_type> set_tables_tombstone_gc(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
apilog.info("set_tables_tombstone_gc: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
return for_tables_on_all_shards(ctx, keyspace, std::move(tables), [enabled] (replica::table& t) {
|
||||
t.set_tombstone_gc_enabled(enabled);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
@@ -131,7 +146,7 @@ static future<json::json_return_type> get_cf_stats_count(http_context& ctx, con
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = parse_table_info(name, ctx.db.local()).id;
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([uuid, f](replica::database& db) {
|
||||
// Histograms information is sample of the actual load
|
||||
// so to get an estimation of sum, we multiply the mean
|
||||
@@ -154,7 +169,7 @@ static future<json::json_return_type> get_cf_stats_count(http_context& ctx,
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = parse_table_info(name, ctx.db.local()).id;
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
@@ -166,7 +181,7 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, const
|
||||
|
||||
static future<json::json_return_type> get_cf_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = parse_table_info(name, ctx.db.local()).id;
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).hist;},
|
||||
utils::ihistogram(),
|
||||
@@ -187,13 +202,13 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils:
|
||||
return ctx.db.map(fun).then([](const std::vector<utils::ihistogram> &res) {
|
||||
std::vector<httpd::utils_json::histogram> r;
|
||||
std::ranges::copy(res | std::views::transform(to_json), std::back_inserter(r));
|
||||
return make_ready_future<json::json_return_type>(std::move(r));
|
||||
return make_ready_future<json::json_return_type>(r);
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_rate_and_histogram(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
auto uuid = parse_table_info(name, ctx.db.local()).id;
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([f, uuid](const replica::database& p) {
|
||||
return (p.find_column_family(uuid).get_stats().*f).rate();},
|
||||
utils::rate_moving_average_and_histogram(),
|
||||
@@ -250,29 +265,48 @@ static integral_ratio_holder mean_partition_size(replica::column_family& cf) {
|
||||
return res;
|
||||
}
|
||||
|
||||
static auto count_bytes_on_disk(const replica::column_family& cf, bool total) {
|
||||
uint64_t bytes_on_disk = 0;
|
||||
auto sstables = (total) ? cf.get_sstables_including_compacted_undeleted() : cf.get_sstables();
|
||||
for (auto t : *sstables) {
|
||||
bytes_on_disk += t->bytes_on_disk();
|
||||
static std::unordered_map<sstring, uint64_t> merge_maps(std::unordered_map<sstring, uint64_t> a,
|
||||
const std::unordered_map<sstring, uint64_t>& b) {
|
||||
a.insert(b.begin(), b.end());
|
||||
return a;
|
||||
}
|
||||
|
||||
static json::json_return_type sum_map(const std::unordered_map<sstring, uint64_t>& val) {
|
||||
uint64_t res = 0;
|
||||
for (auto i : val) {
|
||||
res += i.second;
|
||||
}
|
||||
return bytes_on_disk;
|
||||
return res;
|
||||
}
|
||||
|
||||
static future<json::json_return_type> sum_sstable(http_context& ctx, const sstring name, bool total) {
|
||||
return map_reduce_cf_raw(ctx, name, uint64_t(0), [total](replica::column_family& cf) {
|
||||
return count_bytes_on_disk(cf, total);
|
||||
}, std::plus<>()).then([] (uint64_t val) {
|
||||
return make_ready_future<json::json_return_type>(val);
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
return ctx.db.map_reduce0([uuid, total](replica::database& db) {
|
||||
std::unordered_map<sstring, uint64_t> m;
|
||||
auto sstables = (total) ? db.find_column_family(uuid).get_sstables_including_compacted_undeleted() :
|
||||
db.find_column_family(uuid).get_sstables();
|
||||
for (auto t : *sstables) {
|
||||
m[t->get_filename()] = t->bytes_on_disk();
|
||||
}
|
||||
return m;
|
||||
}, std::unordered_map<sstring, uint64_t>(), merge_maps).
|
||||
then([](const std::unordered_map<sstring, uint64_t>& val) {
|
||||
return sum_map(val);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
static future<json::json_return_type> sum_sstable(http_context& ctx, bool total) {
|
||||
return map_reduce_cf_raw(ctx, uint64_t(0), [total](replica::column_family& cf) {
|
||||
return count_bytes_on_disk(cf, total);
|
||||
}, std::plus<>()).then([] (uint64_t val) {
|
||||
return make_ready_future<json::json_return_type>(val);
|
||||
return map_reduce_cf_raw(ctx, std::unordered_map<sstring, uint64_t>(), [total](replica::column_family& cf) {
|
||||
std::unordered_map<sstring, uint64_t> m;
|
||||
auto sstables = (total) ? cf.get_sstables_including_compacted_undeleted() :
|
||||
cf.get_sstables();
|
||||
for (auto t : *sstables) {
|
||||
m[t->get_filename()] = t->bytes_on_disk();
|
||||
}
|
||||
return m;
|
||||
},merge_maps).then([](const std::unordered_map<sstring, uint64_t>& val) {
|
||||
return sum_map(val);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -884,92 +918,94 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_auto_compaction.set(r, [&ctx] (const_req req) {
|
||||
auto uuid = parse_table_info(req.get_path_param("name"), ctx.db.local()).id;
|
||||
auto uuid = get_uuid(req.get_path_param("name"), ctx.db.local());
|
||||
replica::column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
return !cf.is_auto_compaction_disabled_by_user();
|
||||
});
|
||||
|
||||
cf::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/enable_auto_compaction: name={}", req->get_path_param("name"));
|
||||
auto ti = parse_table_info(req->get_path_param("name"), ctx.db.local());
|
||||
return set_tables_autocompaction(ctx, {std::move(ti)}, true);
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_autocompaction(ctx, ks, {std::move(cf)}, true);
|
||||
});
|
||||
|
||||
cf::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/disable_auto_compaction: name={}", req->get_path_param("name"));
|
||||
auto ti = parse_table_info(req->get_path_param("name"), ctx.db.local());
|
||||
return set_tables_autocompaction(ctx, {std::move(ti)}, false);
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_autocompaction(ctx, ks, {std::move(cf)}, false);
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, std::move(tables), true);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, std::move(tables), false);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
});
|
||||
|
||||
cf::get_tombstone_gc.set(r, [&ctx] (const_req req) {
|
||||
auto uuid = parse_table_info(req.get_path_param("name"), ctx.db.local()).id;
|
||||
auto uuid = get_uuid(req.get_path_param("name"), ctx.db.local());
|
||||
replica::table& t = ctx.db.local().find_column_family(uuid);
|
||||
return t.tombstone_gc_enabled();
|
||||
});
|
||||
|
||||
cf::enable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/enable_tombstone_gc: name={}", req->get_path_param("name"));
|
||||
auto ti = parse_table_info(req->get_path_param("name"), ctx.db.local());
|
||||
return set_tables_tombstone_gc(ctx, {std::move(ti)}, true);
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_tombstone_gc(ctx, ks, {std::move(cf)}, true);
|
||||
});
|
||||
|
||||
cf::disable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/disable_tombstone_gc: name={}", req->get_path_param("name"));
|
||||
auto ti = parse_table_info(req->get_path_param("name"), ctx.db.local());
|
||||
return set_tables_tombstone_gc(ctx, {std::move(ti)}, false);
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_tombstone_gc(ctx, ks, {std::move(cf)}, false);
|
||||
});
|
||||
|
||||
ss::enable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_tombstone_gc: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_tombstone_gc(ctx, std::move(tables), true);
|
||||
return set_tables_tombstone_gc(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_tombstone_gc: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_tombstone_gc(ctx, std::move(tables), false);
|
||||
return set_tables_tombstone_gc(ctx, keyspace, tables, false);
|
||||
});
|
||||
|
||||
cf::get_built_indexes.set(r, [&ctx, &sys_ks](std::unique_ptr<http::request> req) {
|
||||
auto ks_cf = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
auto&& ks = std::get<0>(ks_cf);
|
||||
auto&& cf_name = std::get<1>(ks_cf);
|
||||
// Use of load_built_views() as filtering table should be in sync with
|
||||
// built_indexes_virtual_reader filtering with BUILT_VIEWS table
|
||||
return sys_ks.local().load_built_views().then([ks, cf_name, &ctx](const std::vector<db::system_keyspace::view_name>& vb) mutable {
|
||||
return sys_ks.local().load_view_build_progress().then([ks, cf_name, &ctx](const std::vector<db::system_keyspace_view_build_progress>& vb) mutable {
|
||||
std::set<sstring> vp;
|
||||
for (auto b : vb) {
|
||||
if (b.first == ks) {
|
||||
vp.insert(b.second);
|
||||
if (b.view.first == ks) {
|
||||
vp.insert(b.view.second);
|
||||
}
|
||||
}
|
||||
std::vector<sstring> res;
|
||||
auto uuid = validate_table(ctx.db.local(), ks, cf_name);
|
||||
auto uuid = get_uuid(ks, cf_name, ctx.db.local());
|
||||
replica::column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
res.reserve(cf.get_index_manager().list_indexes().size());
|
||||
for (auto&& i : cf.get_index_manager().list_indexes()) {
|
||||
if (vp.contains(secondary_index::index_table_name(i.metadata().name()))) {
|
||||
if (!vp.contains(secondary_index::index_table_name(i.metadata().name()))) {
|
||||
res.emplace_back(i.metadata().name());
|
||||
}
|
||||
}
|
||||
@@ -992,7 +1028,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_compression_ratio.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto uuid = parse_table_info(req->get_path_param("name"), ctx.db.local()).id;
|
||||
auto uuid = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
|
||||
return ctx.db.map_reduce(sum_ratio<double>(), [uuid](replica::database& db) {
|
||||
replica::column_family& cf = db.find_column_family(uuid);
|
||||
@@ -1015,17 +1051,17 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::set_compaction_strategy_class.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto ti = parse_table_info(req->get_path_param("name"), ctx.db.local());
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
sstring strategy = req->get_query_param("class_name");
|
||||
apilog.info("column_family/set_compaction_strategy_class: name={} strategy={}", req->get_path_param("name"), strategy);
|
||||
return for_tables_on_all_shards(ctx, {std::move(ti)}, [strategy] (replica::table& cf) {
|
||||
return for_tables_on_all_shards(ctx, ks, {std::move(cf)}, [strategy] (replica::table& cf) {
|
||||
cf.set_compaction_strategy(sstables::compaction_strategy::type(strategy));
|
||||
return make_ready_future<>();
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_compaction_strategy_class.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().find_column_family(parse_table_info(req.get_path_param("name"), ctx.db.local()).id).get_compaction_strategy().name();
|
||||
return ctx.db.local().find_column_family(get_uuid(req.get_path_param("name"), ctx.db.local())).get_compaction_strategy().name();
|
||||
});
|
||||
|
||||
cf::set_compression_parameters.set(r, [](std::unique_ptr<http::request> req) {
|
||||
@@ -1050,7 +1086,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
cf::get_sstables_for_key.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto key = req->get_query_param("key");
|
||||
auto uuid = parse_table_info(req->get_path_param("name"), ctx.db.local()).id;
|
||||
auto uuid = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
|
||||
return ctx.db.map_reduce0([key, uuid] (replica::database& db) -> future<std::unordered_set<sstring>> {
|
||||
auto sstables = co_await db.find_column_family(uuid).get_sstables_by_partition_key(key);
|
||||
|
||||
@@ -22,12 +22,12 @@ namespace api {
|
||||
void set_column_family(http_context& ctx, httpd::routes& r, sharded<db::system_keyspace>& sys_ks);
|
||||
void unset_column_family(http_context& ctx, httpd::routes& r);
|
||||
|
||||
table_info parse_table_info(const sstring& name, const replica::database& db);
|
||||
table_id get_uuid(const sstring& name, const replica::database& db);
|
||||
|
||||
template<class Mapper, class I, class Reducer>
|
||||
future<I> map_reduce_cf_raw(http_context& ctx, const sstring& name, I init,
|
||||
Mapper mapper, Reducer reducer) {
|
||||
auto uuid = parse_table_info(name, ctx.db.local()).id;
|
||||
auto uuid = get_uuid(name, ctx.db.local());
|
||||
using mapper_type = std::function<std::unique_ptr<std::any>(replica::database&)>;
|
||||
using reducer_type = std::function<std::unique_ptr<std::any>(std::unique_ptr<std::any>, std::unique_ptr<std::any>)>;
|
||||
return ctx.db.map_reduce0(mapper_type([mapper, uuid](replica::database& db) {
|
||||
|
||||
@@ -112,12 +112,12 @@ void set_compaction_manager(http_context& ctx, routes& r, sharded<compaction_man
|
||||
|
||||
cm::stop_keyspace_compaction.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto ks_name = validate_keyspace(ctx, req);
|
||||
auto tables = parse_table_infos(ks_name, ctx, req->query_parameters, "tables");
|
||||
auto table_names = parse_tables(ks_name, ctx, req->query_parameters, "tables");
|
||||
auto type = req->get_query_param("type");
|
||||
co_await ctx.db.invoke_on_all([&] (replica::database& db) {
|
||||
auto& cm = db.get_compaction_manager();
|
||||
return parallel_for_each(tables, [&] (const table_info& ti) {
|
||||
auto& t = db.find_column_family(ti.id);
|
||||
return parallel_for_each(table_names, [&] (sstring& table_name) {
|
||||
auto& t = db.find_column_family(ks_name, table_name);
|
||||
return t.parallel_foreach_table_state([&] (compaction::table_state& ts) {
|
||||
return cm.stop_compaction(type, &ts);
|
||||
});
|
||||
@@ -204,6 +204,14 @@ void set_compaction_manager(http_context& ctx, routes& r, sharded<compaction_man
|
||||
int value = cm.local().throughput_mbs();
|
||||
return make_ready_future<json::json_return_type>(value);
|
||||
});
|
||||
|
||||
ss::set_compaction_throughput_mb_per_sec.set(r, [](std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto value = req->get_query_param("value");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
void unset_compaction_manager(http_context& ctx, routes& r) {
|
||||
@@ -219,6 +227,7 @@ void unset_compaction_manager(http_context& ctx, routes& r) {
|
||||
cm::get_compaction_history.unset(r);
|
||||
cm::get_compaction_info.unset(r);
|
||||
ss::get_compaction_throughput_mb_per_sec.unset(r);
|
||||
ss::set_compaction_throughput_mb_per_sec.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "replica/database.hh"
|
||||
#include "db/config.hh"
|
||||
#include <sstream>
|
||||
#include <fmt/ranges.h>
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <seastar/http/exception.hh>
|
||||
|
||||
@@ -84,7 +83,7 @@ future<> get_config_swagger_entry(std::string_view name, const std::string& desc
|
||||
|
||||
namespace cs = httpd::config_json;
|
||||
|
||||
void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx, routes& r, db::config& cfg, bool first) {
|
||||
void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx, routes& r, const db::config& cfg, bool first) {
|
||||
rb->register_function(r, [&cfg, first] (output_stream<char>& os) {
|
||||
return do_with(first, [&os, &cfg] (bool& first) {
|
||||
auto f = make_ready_future();
|
||||
@@ -194,17 +193,6 @@ void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx
|
||||
return cfg.saved_caches_directory();
|
||||
});
|
||||
|
||||
ss::set_compaction_throughput_mb_per_sec.set(r, [&cfg](std::unique_ptr<http::request> req) mutable {
|
||||
api::req_param<uint32_t> value(*req, "value", 0);
|
||||
cfg.compaction_throughput_mb_per_sec(value.value, utils::config_file::config_source::API);
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
|
||||
ss::set_stream_throughput_mb_per_sec.set(r, [&cfg](std::unique_ptr<http::request> req) mutable {
|
||||
api::req_param<uint32_t> value(*req, "value", 0);
|
||||
cfg.stream_io_throughput_mb_per_sec(value.value, utils::config_file::config_source::API);
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
});
|
||||
}
|
||||
|
||||
void unset_config(http_context& ctx, routes& r) {
|
||||
@@ -225,8 +213,6 @@ void unset_config(http_context& ctx, routes& r) {
|
||||
sp::set_truncate_rpc_timeout.unset(r);
|
||||
ss::get_all_data_file_locations.unset(r);
|
||||
ss::get_saved_caches_location.unset(r);
|
||||
ss::set_compaction_throughput_mb_per_sec.unset(r);
|
||||
ss::set_stream_throughput_mb_per_sec.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -13,6 +13,6 @@
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_config(std::shared_ptr<httpd::api_registry_builder20> rb, http_context& ctx, httpd::routes& r, db::config& cfg, bool first = false);
|
||||
void set_config(std::shared_ptr<httpd::api_registry_builder20> rb, http_context& ctx, httpd::routes& r, const db::config& cfg, bool first = false);
|
||||
void unset_config(http_context& ctx, httpd::routes& r);
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "build_mode.hh"
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
@@ -28,24 +26,21 @@ struct connection_sl_params : public json::json_base {
|
||||
json::json_element<sstring> _role_name;
|
||||
json::json_element<sstring> _workload_type;
|
||||
json::json_element<sstring> _timeout;
|
||||
json::json_element<sstring> _scheduling_group;
|
||||
|
||||
connection_sl_params(const sstring& role_name, const sstring& workload_type, const sstring& timeout, const sstring& scheduling_group) {
|
||||
connection_sl_params(const sstring& role_name, const sstring& workload_type, const sstring& timeout) {
|
||||
_role_name = role_name;
|
||||
_workload_type = workload_type;
|
||||
_timeout = timeout;
|
||||
_scheduling_group = scheduling_group;
|
||||
register_params();
|
||||
}
|
||||
|
||||
connection_sl_params(const connection_sl_params& params)
|
||||
: connection_sl_params(params._role_name(), params._workload_type(), params._timeout(), params._scheduling_group()) {}
|
||||
: connection_sl_params(params._role_name(), params._workload_type(), params._timeout()) {}
|
||||
|
||||
void register_params() {
|
||||
add(&_role_name, "role_name");
|
||||
add(&_workload_type, "workload_type");
|
||||
add(&_timeout, "timeout");
|
||||
add(&_scheduling_group, "scheduling_group");
|
||||
}
|
||||
};
|
||||
|
||||
@@ -59,8 +54,7 @@ void set_cql_server_test(http_context& ctx, seastar::httpd::routes& r, cql_trans
|
||||
return connection_sl_params(
|
||||
std::move(params.role_name),
|
||||
sstring(qos::service_level_options::to_string(params.workload_type)),
|
||||
to_string(cql_duration(months_counter{0}, days_counter{0}, nanoseconds_counter{nanos})),
|
||||
std::move(params.scheduling_group_name));
|
||||
to_string(cql_duration(months_counter{0}, days_counter{0}, nanoseconds_counter{nanos})));
|
||||
});
|
||||
co_return result;
|
||||
});
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
#include "gms/inet_address.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
@@ -22,18 +21,18 @@ using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
namespace hh = httpd::hinted_handoff_json;
|
||||
|
||||
void set_hinted_handoff(http_context& ctx, routes& r, sharded<service::storage_proxy>& proxy, sharded<gms::gossiper>& g) {
|
||||
hh::create_hints_sync_point.set(r, [&proxy, &g] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto parse_hosts_list = [&g] (sstring arg) {
|
||||
void set_hinted_handoff(http_context& ctx, routes& r, sharded<service::storage_proxy>& proxy) {
|
||||
hh::create_hints_sync_point.set(r, [&proxy] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto parse_hosts_list = [] (sstring arg) {
|
||||
std::vector<sstring> hosts_str = split(arg, ",");
|
||||
std::vector<locator::host_id> hosts;
|
||||
std::vector<gms::inet_address> hosts;
|
||||
hosts.reserve(hosts_str.size());
|
||||
|
||||
for (const auto& host_str : hosts_str) {
|
||||
try {
|
||||
gms::inet_address host;
|
||||
host = gms::inet_address(host_str);
|
||||
hosts.push_back(g.local().get_host_id(host));
|
||||
hosts.push_back(host);
|
||||
} catch (std::exception& e) {
|
||||
throw httpd::bad_param_exception(format("Failed to parse host address {}: {}", host_str, e.what()));
|
||||
}
|
||||
@@ -42,7 +41,7 @@ void set_hinted_handoff(http_context& ctx, routes& r, sharded<service::storage_p
|
||||
return hosts;
|
||||
};
|
||||
|
||||
std::vector<locator::host_id> target_hosts = parse_hosts_list(req->get_query_param("target_hosts"));
|
||||
std::vector<gms::inet_address> target_hosts = parse_hosts_list(req->get_query_param("target_hosts"));
|
||||
return proxy.local().create_hint_sync_point(std::move(target_hosts)).then([] (db::hints::sync_point sync_point) {
|
||||
return json::json_return_type(sync_point.encode());
|
||||
});
|
||||
|
||||
@@ -10,13 +10,12 @@
|
||||
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include "api/api_init.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
namespace service { class storage_proxy; }
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_hinted_handoff(http_context& ctx, httpd::routes& r, sharded<service::storage_proxy>& p, sharded<gms::gossiper>& g);
|
||||
void set_hinted_handoff(http_context& ctx, httpd::routes& r, sharded<service::storage_proxy>& p);
|
||||
void unset_hinted_handoff(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ void set_messaging_service(http_context& ctx, routes& r, sharded<netw::messaging
|
||||
}));
|
||||
|
||||
get_version.set(r, [&ms](const_req req) {
|
||||
return ms.local().current_version;
|
||||
return ms.local().get_raw_version(gms::inet_address(req.get_query_param("addr")));
|
||||
});
|
||||
|
||||
get_dropped_messages_by_ver.set(r, [&ms](std::unique_ptr<request> req) {
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2023-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "service_levels.hh"
|
||||
#include "api/api-doc/service_levels.json.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "cql3/untyped_result_set.hh"
|
||||
#include "db/consistency_level_type.hh"
|
||||
#include "seastar/json/json_elements.hh"
|
||||
#include "transport/controller.hh"
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
namespace api {
|
||||
|
||||
namespace sl = httpd::service_levels_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
|
||||
void set_service_levels(http_context& ctx, routes& r, cql_transport::controller& ctl, sharded<cql3::query_processor>& qp) {
|
||||
sl::do_switch_tenants.set(r, [&ctl] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await ctl.update_connections_scheduling_group();
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
sl::count_connections.set(r, [&qp] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto connections = co_await qp.local().execute_internal(
|
||||
"SELECT username, scheduling_group FROM system.clients WHERE client_type='cql' ALLOW FILTERING",
|
||||
db::consistency_level::LOCAL_ONE,
|
||||
cql3::query_processor::cache_internal::no
|
||||
);
|
||||
|
||||
using connections_per_user = std::unordered_map<sstring, uint64_t>;
|
||||
using connections_per_scheduling_group = std::unordered_map<sstring, connections_per_user>;
|
||||
connections_per_scheduling_group result;
|
||||
|
||||
for (auto it = connections->begin(); it != connections->end(); it++) {
|
||||
auto user = it->get_as<sstring>("username");
|
||||
auto shg = it->get_as<sstring>("scheduling_group");
|
||||
|
||||
if (result.contains(shg)) {
|
||||
result[shg][user]++;
|
||||
}
|
||||
else {
|
||||
result[shg] = {{user, 1}};
|
||||
}
|
||||
}
|
||||
|
||||
co_return result;
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2023-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "api/api_init.hh"
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_service_levels(http_context& ctx, httpd::routes& r, cql_transport::controller& ctl, sharded<cql3::query_processor>& qp);
|
||||
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,9 +43,16 @@ sstring validate_keyspace(const http_context& ctx, sstring ks_name);
|
||||
// containing the description of the respective keyspace error.
|
||||
sstring validate_keyspace(const http_context& ctx, const std::unique_ptr<http::request>& req);
|
||||
|
||||
// verify that the keyspace:table is found, otherwise a bad_param_exception exception is thrown
|
||||
// returns the table_id of the table if found
|
||||
table_id validate_table(const replica::database& db, sstring ks_name, sstring table_name);
|
||||
// verify that the table parameter is found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective table error.
|
||||
void validate_table(const http_context& ctx, sstring ks_name, sstring table_name);
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective no_such_column_family error.
|
||||
// Returns an empty vector if no parameter was found.
|
||||
// If the parameter is found and empty, returns a list of all table names in the keyspace.
|
||||
std::vector<sstring> parse_tables(const sstring& ks_name, const http_context& ctx, const std::unordered_map<sstring, sstring>& query_params, sstring param_name);
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
@@ -68,7 +75,7 @@ void set_storage_service(http_context& ctx, httpd::routes& r, sharded<service::s
|
||||
void unset_storage_service(http_context& ctx, httpd::routes& r);
|
||||
void set_sstables_loader(http_context& ctx, httpd::routes& r, sharded<sstables_loader>& sst_loader);
|
||||
void unset_sstables_loader(http_context& ctx, httpd::routes& r);
|
||||
void set_view_builder(http_context& ctx, httpd::routes& r, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g);
|
||||
void set_view_builder(http_context& ctx, httpd::routes& r, sharded<db::view::view_builder>& vb);
|
||||
void unset_view_builder(http_context& ctx, httpd::routes& r);
|
||||
void set_repair(http_context& ctx, httpd::routes& r, sharded<repair_service>& repair, sharded<gms::gossip_address_map>& am);
|
||||
void unset_repair(http_context& ctx, httpd::routes& r);
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#include "streaming/stream_result_future.hh"
|
||||
#include "api/api.hh"
|
||||
#include "api/api-doc/stream_manager.json.hh"
|
||||
#include "api/api-doc/storage_service.json.hh"
|
||||
#include <vector>
|
||||
#include <rapidjson/document.h>
|
||||
#include "gms/gossiper.hh"
|
||||
@@ -19,7 +18,6 @@
|
||||
namespace api {
|
||||
using namespace seastar::httpd;
|
||||
|
||||
namespace ss = httpd::storage_service_json;
|
||||
namespace hs = httpd::stream_manager_json;
|
||||
|
||||
static void set_summaries(const std::vector<streaming::stream_summary>& from,
|
||||
@@ -150,11 +148,6 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_stream_throughput_mb_per_sec.set(r, [&sm](std::unique_ptr<http::request> req) {
|
||||
auto value = sm.local().throughput_mbs();
|
||||
return make_ready_future<json::json_return_type>(value);
|
||||
});
|
||||
}
|
||||
|
||||
void unset_stream_manager(http_context& ctx, routes& r) {
|
||||
@@ -164,7 +157,6 @@ void unset_stream_manager(http_context& ctx, routes& r) {
|
||||
hs::get_all_total_incoming_bytes.unset(r);
|
||||
hs::get_total_outgoing_bytes.unset(r);
|
||||
hs::get_all_total_outgoing_bytes.unset(r);
|
||||
ss::get_stream_throughput_mb_per_sec.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
#include "db/sstables-format-selector.hh"
|
||||
|
||||
#include <rapidjson/document.h>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/core/metrics_api.hh>
|
||||
#include <seastar/core/relabel_config.hh>
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "api/api.hh"
|
||||
#include "api/api-doc/task_manager.json.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
#include "tasks/task_handler.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
|
||||
@@ -26,23 +25,18 @@ namespace tm = httpd::task_manager_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
static ::tm get_time(db_clock::time_point tp) {
|
||||
auto time = db_clock::to_time_t(tp);
|
||||
::tm t;
|
||||
::gmtime_r(&time, &t);
|
||||
return t;
|
||||
}
|
||||
tm::task_status make_status(tasks::task_status status) {
|
||||
auto start_time = db_clock::to_time_t(status.start_time);
|
||||
auto end_time = db_clock::to_time_t(status.end_time);
|
||||
::tm st, et;
|
||||
::gmtime_r(&end_time, &et);
|
||||
::gmtime_r(&start_time, &st);
|
||||
|
||||
tm::task_status make_status(tasks::task_status status, sharded<gms::gossiper>& gossiper) {
|
||||
std::vector<tm::task_identity> tis{status.children.size()};
|
||||
std::ranges::transform(status.children, tis.begin(), [&gossiper] (const auto& child) {
|
||||
std::ranges::transform(status.children, tis.begin(), [] (const auto& child) {
|
||||
tm::task_identity ident;
|
||||
gms::inet_address addr{};
|
||||
if (gossiper.local_is_initialized()) {
|
||||
addr = gossiper.local().get_address_map().find(child.host_id).value_or(gms::inet_address{});
|
||||
}
|
||||
ident.task_id = child.task_id.to_sstring();
|
||||
ident.node = fmt::format("{}", addr);
|
||||
ident.node = fmt::format("{}", child.node);
|
||||
return ident;
|
||||
});
|
||||
|
||||
@@ -53,8 +47,8 @@ tm::task_status make_status(tasks::task_status status, sharded<gms::gossiper>& g
|
||||
res.scope = status.scope;
|
||||
res.state = status.state;
|
||||
res.is_abortable = bool(status.is_abortable);
|
||||
res.start_time = get_time(status.start_time);
|
||||
res.end_time = get_time(status.end_time);
|
||||
res.start_time = st;
|
||||
res.end_time = et;
|
||||
res.error = status.error;
|
||||
res.parent_id = status.parent_id ? status.parent_id.to_sstring() : "none";
|
||||
res.sequence_number = status.sequence_number;
|
||||
@@ -80,13 +74,10 @@ tm::task_stats make_stats(tasks::task_stats stats) {
|
||||
res.keyspace = stats.keyspace;
|
||||
res.table = stats.table;
|
||||
res.entity = stats.entity;
|
||||
res.shard = stats.shard;
|
||||
res.start_time = get_time(stats.start_time);
|
||||
res.end_time = get_time(stats.end_time);;
|
||||
return res;
|
||||
}
|
||||
|
||||
void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>& tm, db::config& cfg, sharded<gms::gossiper>& gossiper) {
|
||||
void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>& tm, db::config& cfg) {
|
||||
tm::get_modules.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
std::vector<std::string> v = tm.local().get_modules() | std::views::keys | std::ranges::to<std::vector>();
|
||||
co_return v;
|
||||
@@ -144,7 +135,7 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
co_return std::move(f);
|
||||
});
|
||||
|
||||
tm::get_task_status.set(r, [&tm, &gossiper] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::get_task_status.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
tasks::task_status status;
|
||||
try {
|
||||
@@ -153,7 +144,7 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
}
|
||||
co_return make_status(status, gossiper);
|
||||
co_return make_status(status);
|
||||
});
|
||||
|
||||
tm::abort_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
@@ -169,7 +160,7 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
tm::wait_task.set(r, [&tm, &gossiper] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::wait_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
tasks::task_status status;
|
||||
std::optional<std::chrono::seconds> timeout = std::nullopt;
|
||||
@@ -184,24 +175,24 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
} catch (timed_out_error& e) {
|
||||
throw httpd::base_exception{e.what(), http::reply::status_type::request_timeout};
|
||||
}
|
||||
co_return make_status(status, gossiper);
|
||||
co_return make_status(status);
|
||||
});
|
||||
|
||||
tm::get_task_status_recursively.set(r, [&_tm = tm, &gossiper] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
tm::get_task_status_recursively.set(r, [&_tm = tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& tm = _tm;
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
try {
|
||||
auto task = tasks::task_handler{tm.local(), id};
|
||||
auto res = co_await task.get_status_recursively(true);
|
||||
|
||||
std::function<future<>(output_stream<char>&&)> f = [r = std::move(res), &gossiper] (output_stream<char>&& os) -> future<> {
|
||||
std::function<future<>(output_stream<char>&&)> f = [r = std::move(res)] (output_stream<char>&& os) -> future<> {
|
||||
auto s = std::move(os);
|
||||
auto res = std::move(r);
|
||||
co_await s.write("[");
|
||||
std::string delim = "";
|
||||
for (auto& status: res) {
|
||||
co_await s.write(std::exchange(delim, ", "));
|
||||
co_await formatter::write(s, make_status(status, gossiper));
|
||||
co_await formatter::write(s, make_status(status));
|
||||
}
|
||||
co_await s.write("]");
|
||||
co_await s.close();
|
||||
@@ -241,32 +232,6 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
uint32_t user_ttl = cfg.user_task_ttl_seconds();
|
||||
co_return json::json_return_type(user_ttl);
|
||||
});
|
||||
|
||||
tm::drain_tasks.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
co_await tm.invoke_on_all([&req] (tasks::task_manager& tm) -> future<> {
|
||||
tasks::task_manager::module_ptr module;
|
||||
try {
|
||||
module = tm.find_module(req->get_path_param("module"));
|
||||
} catch (...) {
|
||||
throw bad_param_exception(fmt::format("{}", std::current_exception()));
|
||||
}
|
||||
|
||||
const auto& local_tasks = module->get_local_tasks();
|
||||
std::vector<tasks::task_id> ids;
|
||||
ids.reserve(local_tasks.size());
|
||||
std::transform(begin(local_tasks), end(local_tasks), std::back_inserter(ids), [] (const auto& task) {
|
||||
return task.second->is_complete() ? task.first : tasks::task_id::create_null_id();
|
||||
});
|
||||
|
||||
for (auto&& id : ids) {
|
||||
if (id) {
|
||||
module->unregister_task(id);
|
||||
}
|
||||
co_await maybe_yield();
|
||||
}
|
||||
});
|
||||
co_return json_void();
|
||||
});
|
||||
}
|
||||
|
||||
void unset_task_manager(http_context& ctx, routes& r) {
|
||||
@@ -278,7 +243,6 @@ void unset_task_manager(http_context& ctx, routes& r) {
|
||||
tm::get_task_status_recursively.unset(r);
|
||||
tm::get_and_update_ttl.unset(r);
|
||||
tm::get_ttl.unset(r);
|
||||
tm::drain_tasks.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ namespace tasks {
|
||||
|
||||
namespace api {
|
||||
|
||||
void set_task_manager(http_context& ctx, httpd::routes& r, sharded<tasks::task_manager>& tm, db::config& cfg, sharded<gms::gossiper>& gossiper);
|
||||
void set_task_manager(http_context& ctx, httpd::routes& r, sharded<tasks::task_manager>& tm, db::config& cfg);
|
||||
void unset_task_manager(http_context& ctx, httpd::routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -6,9 +6,6 @@
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
|
||||
#include "build_mode.hh"
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#include "api/api-doc/storage_service.json.hh"
|
||||
#include "api/api-doc/endpoint_snitch_info.json.hh"
|
||||
#include "locator/token_metadata.hh"
|
||||
#include "gms/gossiper.hh"
|
||||
|
||||
using namespace seastar::httpd;
|
||||
|
||||
@@ -19,7 +18,7 @@ namespace api {
|
||||
namespace ss = httpd::storage_service_json;
|
||||
using namespace json;
|
||||
|
||||
void set_token_metadata(http_context& ctx, routes& r, sharded<locator::shared_token_metadata>& tm, sharded<gms::gossiper>& g) {
|
||||
void set_token_metadata(http_context& ctx, routes& r, sharded<locator::shared_token_metadata>& tm) {
|
||||
ss::local_hostid.set(r, [&tm](std::unique_ptr<http::request> req) {
|
||||
auto id = tm.local().get()->get_my_id();
|
||||
if (!bool(id)) {
|
||||
@@ -34,25 +33,22 @@ void set_token_metadata(http_context& ctx, routes& r, sharded<locator::shared_to
|
||||
}));
|
||||
});
|
||||
|
||||
ss::get_node_tokens.set(r, [&tm, &g] (std::unique_ptr<http::request> req) {
|
||||
ss::get_node_tokens.set(r, [&tm] (std::unique_ptr<http::request> req) {
|
||||
gms::inet_address addr(req->get_path_param("endpoint"));
|
||||
auto& local_tm = *tm.local().get();
|
||||
std::optional<locator::host_id> host_id;
|
||||
try {
|
||||
host_id = g.local().get_host_id(addr);
|
||||
} catch (...) {}
|
||||
const auto host_id = local_tm.get_host_id_if_known(addr);
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(host_id ? local_tm.get_tokens(*host_id): std::vector<dht::token>{}, [](const dht::token& i) {
|
||||
return fmt::to_string(i);
|
||||
}));
|
||||
});
|
||||
|
||||
ss::get_leaving_nodes.set(r, [&tm, &g](const_req req) {
|
||||
ss::get_leaving_nodes.set(r, [&tm](const_req req) {
|
||||
const auto& local_tm = *tm.local().get();
|
||||
const auto& leaving_host_ids = local_tm.get_leaving_endpoints();
|
||||
std::unordered_set<gms::inet_address> eps;
|
||||
eps.reserve(leaving_host_ids.size());
|
||||
for (const auto host_id: leaving_host_ids) {
|
||||
eps.insert(g.local().get_address_map().get(host_id));
|
||||
eps.insert(local_tm.get_endpoint_for_host_id(host_id));
|
||||
}
|
||||
return container_to_vec(eps);
|
||||
});
|
||||
@@ -62,23 +58,20 @@ void set_token_metadata(http_context& ctx, routes& r, sharded<locator::shared_to
|
||||
return container_to_vec(addr);
|
||||
});
|
||||
|
||||
ss::get_joining_nodes.set(r, [&tm, &g](const_req req) {
|
||||
ss::get_joining_nodes.set(r, [&tm](const_req req) {
|
||||
const auto& local_tm = *tm.local().get();
|
||||
const auto& points = local_tm.get_bootstrap_tokens();
|
||||
std::unordered_set<gms::inet_address> eps;
|
||||
eps.reserve(points.size());
|
||||
for (const auto& [token, host_id]: points) {
|
||||
eps.insert(g.local().get_address_map().get(host_id));
|
||||
eps.insert(local_tm.get_endpoint_for_host_id(host_id));
|
||||
}
|
||||
return container_to_vec(eps);
|
||||
});
|
||||
|
||||
ss::get_host_id_map.set(r, [&tm, &g](const_req req) {
|
||||
ss::get_host_id_map.set(r, [&tm](const_req req) {
|
||||
std::vector<ss::mapper> res;
|
||||
auto map = tm.local().get()->get_host_ids() |
|
||||
std::views::transform([&g] (locator::host_id id) { return std::make_pair(g.local().get_address_map().get(id), id); }) |
|
||||
std::ranges::to<std::unordered_map>();
|
||||
return map_to_key_value(std::move(map), res);
|
||||
return map_to_key_value(tm.local().get()->get_endpoint_to_host_id_map(), res);
|
||||
});
|
||||
|
||||
static auto host_or_broadcast = [&tm](const_req req) {
|
||||
@@ -86,34 +79,26 @@ void set_token_metadata(http_context& ctx, routes& r, sharded<locator::shared_to
|
||||
return host.empty() ? tm.local().get()->get_topology().my_address() : gms::inet_address(host);
|
||||
};
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [&tm, &g](const_req req) {
|
||||
httpd::endpoint_snitch_info_json::get_datacenter.set(r, [&tm](const_req req) {
|
||||
auto& topology = tm.local().get()->get_topology();
|
||||
auto ep = host_or_broadcast(req);
|
||||
std::optional<locator::host_id> host_id;
|
||||
try {
|
||||
host_id = g.local().get_host_id(ep);
|
||||
} catch (...) {}
|
||||
if (!host_id || !topology.has_node(*host_id)) {
|
||||
if (!topology.has_endpoint(ep)) {
|
||||
// Cannot return error here, nodetool status can race, request
|
||||
// info about just-left node and not handle it nicely
|
||||
return locator::endpoint_dc_rack::default_location.dc;
|
||||
}
|
||||
return topology.get_datacenter(*host_id);
|
||||
return topology.get_datacenter(ep);
|
||||
});
|
||||
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [&tm, &g](const_req req) {
|
||||
httpd::endpoint_snitch_info_json::get_rack.set(r, [&tm](const_req req) {
|
||||
auto& topology = tm.local().get()->get_topology();
|
||||
auto ep = host_or_broadcast(req);
|
||||
std::optional<locator::host_id> host_id;
|
||||
try {
|
||||
host_id = g.local().get_host_id(ep);
|
||||
} catch (...) {}
|
||||
if (!host_id || !topology.has_node(*host_id)) {
|
||||
if (!topology.has_endpoint(ep)) {
|
||||
// Cannot return error here, nodetool status can race, request
|
||||
// info about just-left node and not handle it nicely
|
||||
return locator::endpoint_dc_rack::default_location.rack;
|
||||
}
|
||||
return topology.get_rack(*host_id);
|
||||
return topology.get_rack(ep);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -15,11 +15,10 @@ class routes;
|
||||
}
|
||||
|
||||
namespace locator { class shared_token_metadata; }
|
||||
namespace gms { class gossiper; }
|
||||
|
||||
namespace api {
|
||||
struct http_context;
|
||||
void set_token_metadata(http_context& ctx, seastar::httpd::routes& r, seastar::sharded<locator::shared_token_metadata>& tm, seastar::sharded<gms::gossiper>& g);
|
||||
void set_token_metadata(http_context& ctx, seastar::httpd::routes& r, seastar::sharded<locator::shared_token_metadata>& tm);
|
||||
void unset_token_metadata(http_context& ctx, seastar::httpd::routes& r);
|
||||
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
include(add_whole_archive)
|
||||
|
||||
add_library(scylla_audit STATIC)
|
||||
target_sources(scylla_audit
|
||||
PRIVATE
|
||||
audit.cc
|
||||
audit_cf_storage_helper.cc
|
||||
audit_syslog_storage_helper.cc)
|
||||
target_include_directories(scylla_audit
|
||||
PUBLIC
|
||||
${CMAKE_SOURCE_DIR})
|
||||
target_link_libraries(scylla_audit
|
||||
PUBLIC
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
PRIVATE
|
||||
cql3)
|
||||
|
||||
add_whole_archive(audit scylla_audit)
|
||||
294
audit/audit.cc
294
audit/audit.cc
@@ -1,294 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include "audit/audit.hh"
|
||||
#include "db/config.hh"
|
||||
#include "cql3/cql_statement.hh"
|
||||
#include "cql3/statements/batch_statement.hh"
|
||||
#include "cql3/statements/modification_statement.hh"
|
||||
#include "storage_helper.hh"
|
||||
#include "audit.hh"
|
||||
#include "../db/config.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
|
||||
|
||||
namespace audit {
|
||||
|
||||
logging::logger logger("audit");
|
||||
|
||||
static sstring category_to_string(statement_category category)
|
||||
{
|
||||
switch (category) {
|
||||
case statement_category::QUERY: return "QUERY";
|
||||
case statement_category::DML: return "DML";
|
||||
case statement_category::DDL: return "DDL";
|
||||
case statement_category::DCL: return "DCL";
|
||||
case statement_category::AUTH: return "AUTH";
|
||||
case statement_category::ADMIN: return "ADMIN";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
sstring audit_info::category_string() const {
|
||||
return category_to_string(_category);
|
||||
}
|
||||
|
||||
static category_set parse_audit_categories(const sstring& data) {
|
||||
category_set result;
|
||||
if (!data.empty()) {
|
||||
std::vector<sstring> tokens;
|
||||
boost::split(tokens, data, boost::is_any_of(","));
|
||||
for (sstring& category : tokens) {
|
||||
boost::trim(category);
|
||||
if (category == "QUERY") {
|
||||
result.set(statement_category::QUERY);
|
||||
} else if (category == "DML") {
|
||||
result.set(statement_category::DML);
|
||||
} else if (category == "DDL") {
|
||||
result.set(statement_category::DDL);
|
||||
} else if (category == "DCL") {
|
||||
result.set(statement_category::DCL);
|
||||
} else if (category == "AUTH") {
|
||||
result.set(statement_category::AUTH);
|
||||
} else if (category == "ADMIN") {
|
||||
result.set(statement_category::ADMIN);
|
||||
} else {
|
||||
throw audit_exception(fmt::format("Bad configuration: invalid 'audit_categories': {}", data));
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::map<sstring, std::set<sstring>> parse_audit_tables(const sstring& data) {
|
||||
std::map<sstring, std::set<sstring>> result;
|
||||
if (!data.empty()) {
|
||||
std::vector<sstring> tokens;
|
||||
boost::split(tokens, data, boost::is_any_of(","));
|
||||
for (sstring& token : tokens) {
|
||||
std::vector<sstring> parts;
|
||||
boost::split(parts, token, boost::is_any_of("."));
|
||||
if (parts.size() != 2) {
|
||||
throw audit_exception(fmt::format("Bad configuration: invalid 'audit_tables': {}", data));
|
||||
}
|
||||
boost::trim(parts[0]);
|
||||
boost::trim(parts[1]);
|
||||
result[parts[0]].insert(std::move(parts[1]));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static std::set<sstring> parse_audit_keyspaces(const sstring& data) {
|
||||
std::set<sstring> result;
|
||||
if (!data.empty()) {
|
||||
std::vector<sstring> tokens;
|
||||
boost::split(tokens, data, boost::is_any_of(","));
|
||||
for (sstring& token : tokens) {
|
||||
boost::trim(token);
|
||||
result.insert(std::move(token));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
audit::audit(locator::shared_token_metadata& token_metadata,
|
||||
sstring&& storage_helper_name,
|
||||
std::set<sstring>&& audited_keyspaces,
|
||||
std::map<sstring, std::set<sstring>>&& audited_tables,
|
||||
category_set&& audited_categories,
|
||||
const db::config& cfg)
|
||||
: _token_metadata(token_metadata)
|
||||
, _audited_keyspaces(std::move(audited_keyspaces))
|
||||
, _audited_tables(std::move(audited_tables))
|
||||
, _audited_categories(std::move(audited_categories))
|
||||
, _storage_helper_class_name(std::move(storage_helper_name))
|
||||
, _cfg(cfg)
|
||||
, _cfg_keyspaces_observer(cfg.audit_keyspaces.observe([this] (sstring const& new_value){ update_config<std::set<sstring>>(new_value, parse_audit_keyspaces, _audited_keyspaces); }))
|
||||
, _cfg_tables_observer(cfg.audit_tables.observe([this] (sstring const& new_value){ update_config<std::map<sstring, std::set<sstring>>>(new_value, parse_audit_tables, _audited_tables); }))
|
||||
, _cfg_categories_observer(cfg.audit_categories.observe([this] (sstring const& new_value){ update_config<category_set>(new_value, parse_audit_categories, _audited_categories); }))
|
||||
{ }
|
||||
|
||||
audit::~audit() = default;
|
||||
|
||||
future<> audit::create_audit(const db::config& cfg, sharded<locator::shared_token_metadata>& stm) {
|
||||
sstring storage_helper_name;
|
||||
if (cfg.audit() == "table") {
|
||||
storage_helper_name = "audit_cf_storage_helper";
|
||||
} else if (cfg.audit() == "syslog") {
|
||||
storage_helper_name = "audit_syslog_storage_helper";
|
||||
} else if (cfg.audit() == "none") {
|
||||
// Audit is off
|
||||
logger.info("Audit is disabled");
|
||||
|
||||
return make_ready_future<>();
|
||||
} else {
|
||||
throw audit_exception(fmt::format("Bad configuration: invalid 'audit': {}", cfg.audit()));
|
||||
}
|
||||
category_set audited_categories = parse_audit_categories(cfg.audit_categories());
|
||||
std::map<sstring, std::set<sstring>> audited_tables = parse_audit_tables(cfg.audit_tables());
|
||||
std::set<sstring> audited_keyspaces = parse_audit_keyspaces(cfg.audit_keyspaces());
|
||||
|
||||
logger.info("Audit is enabled. Auditing to: \"{}\", with the following categories: \"{}\", keyspaces: \"{}\", and tables: \"{}\"",
|
||||
cfg.audit(), cfg.audit_categories(), cfg.audit_keyspaces(), cfg.audit_tables());
|
||||
|
||||
return audit_instance().start(std::ref(stm),
|
||||
std::move(storage_helper_name),
|
||||
std::move(audited_keyspaces),
|
||||
std::move(audited_tables),
|
||||
std::move(audited_categories),
|
||||
std::cref(cfg));
|
||||
}
|
||||
|
||||
future<> audit::start_audit(const db::config& cfg, sharded<cql3::query_processor>& qp, sharded<service::migration_manager>& mm) {
|
||||
if (!audit_instance().local_is_initialized()) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return audit_instance().invoke_on_all([&cfg, &qp, &mm] (audit& local_audit) {
|
||||
return local_audit.start(cfg, qp.local(), mm.local());
|
||||
});
|
||||
}
|
||||
|
||||
future<> audit::stop_audit() {
|
||||
if (!audit_instance().local_is_initialized()) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return audit::audit::audit_instance().invoke_on_all([] (auto& local_audit) {
|
||||
return local_audit.shutdown();
|
||||
}).then([] {
|
||||
return audit::audit::audit_instance().stop();
|
||||
});
|
||||
}
|
||||
|
||||
audit_info_ptr audit::create_audit_info(statement_category cat, const sstring& keyspace, const sstring& table) {
|
||||
if (!audit_instance().local_is_initialized()) {
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_unique<audit_info>(cat, keyspace, table);
|
||||
}
|
||||
|
||||
audit_info_ptr audit::create_no_audit_info() {
|
||||
return audit_info_ptr();
|
||||
}
|
||||
|
||||
future<> audit::start(const db::config& cfg, cql3::query_processor& qp, service::migration_manager& mm) {
|
||||
try {
|
||||
_storage_helper_ptr = create_object<storage_helper>(_storage_helper_class_name, qp, mm);
|
||||
} catch (no_such_class& e) {
|
||||
logger.error("Can't create audit storage helper {}: not supported", _storage_helper_class_name);
|
||||
throw;
|
||||
} catch (...) {
|
||||
throw;
|
||||
}
|
||||
return _storage_helper_ptr->start(cfg);
|
||||
}
|
||||
|
||||
future<> audit::stop() {
|
||||
return _storage_helper_ptr->stop();
|
||||
}
|
||||
|
||||
future<> audit::shutdown() {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> audit::log(const audit_info* audit_info, service::query_state& query_state, const cql3::query_options& options, bool error) {
|
||||
const service::client_state& client_state = query_state.get_client_state();
|
||||
socket_address node_ip = _token_metadata.get()->get_topology().my_address().addr();
|
||||
db::consistency_level cl = options.get_consistency();
|
||||
thread_local static sstring no_username("undefined");
|
||||
static const sstring anonymous_username("anonymous");
|
||||
const sstring& username = client_state.user() ? client_state.user()->name.value_or(anonymous_username) : no_username;
|
||||
socket_address client_ip = client_state.get_client_address().addr();
|
||||
return futurize_invoke(std::mem_fn(&storage_helper::write), _storage_helper_ptr, audit_info, node_ip, client_ip, cl, username, error)
|
||||
.handle_exception([audit_info, node_ip, client_ip, cl, username, error] (auto ep) {
|
||||
logger.error("Unexpected exception when writing log with: node_ip {} category {} cl {} error {} keyspace {} query '{}' client_ip {} table {} username {} exception {}",
|
||||
node_ip, audit_info->category_string(), cl, error, audit_info->keyspace(),
|
||||
audit_info->query(), client_ip, audit_info->table(),username, ep);
|
||||
});
|
||||
}
|
||||
|
||||
future<> audit::log_login(const sstring& username, socket_address client_ip, bool error) noexcept {
|
||||
socket_address node_ip = _token_metadata.get()->get_topology().my_address().addr();
|
||||
return futurize_invoke(std::mem_fn(&storage_helper::write_login), _storage_helper_ptr, username, node_ip, client_ip, error)
|
||||
.handle_exception([username, node_ip, client_ip, error] (auto ep) {
|
||||
logger.error("Unexpected exception when writing login log with: node_ip {} client_ip {} username {} error {} exception {}",
|
||||
node_ip, client_ip, username, error, ep);
|
||||
});
|
||||
}
|
||||
|
||||
future<> inspect(shared_ptr<cql3::cql_statement> statement, service::query_state& query_state, const cql3::query_options& options, bool error) {
|
||||
cql3::statements::batch_statement* batch = dynamic_cast<cql3::statements::batch_statement*>(statement.get());
|
||||
if (batch != nullptr) {
|
||||
return do_for_each(batch->statements().begin(), batch->statements().end(), [&query_state, &options, error] (auto&& m) {
|
||||
return inspect(m.statement, query_state, options, error);
|
||||
});
|
||||
} else {
|
||||
auto audit_info = statement->get_audit_info();
|
||||
if (bool(audit_info) && audit::local_audit_instance().should_log(audit_info)) {
|
||||
return audit::local_audit_instance().log(audit_info, query_state, options, error);
|
||||
}
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> inspect_login(const sstring& username, socket_address client_ip, bool error) {
|
||||
if (!audit::audit_instance().local_is_initialized() || !audit::local_audit_instance().should_log_login()) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
return audit::local_audit_instance().log_login(username, client_ip, error);
|
||||
}
|
||||
|
||||
bool audit::should_log_table(const sstring& keyspace, const sstring& name) const {
|
||||
auto keyspace_it = _audited_tables.find(keyspace);
|
||||
return keyspace_it != _audited_tables.cend() && keyspace_it->second.find(name) != keyspace_it->second.cend();
|
||||
}
|
||||
|
||||
bool audit::should_log(const audit_info* audit_info) const {
|
||||
return _audited_categories.contains(audit_info->category())
|
||||
&& (_audited_keyspaces.find(audit_info->keyspace()) != _audited_keyspaces.cend()
|
||||
|| should_log_table(audit_info->keyspace(), audit_info->table())
|
||||
|| audit_info->category() == statement_category::AUTH
|
||||
|| audit_info->category() == statement_category::ADMIN
|
||||
|| audit_info->category() == statement_category::DCL);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void audit::update_config(const sstring & new_value, std::function<T(const sstring&)> parse_func, T& cfg_parameter)
|
||||
{
|
||||
try {
|
||||
cfg_parameter = parse_func(new_value);
|
||||
} catch (...) {
|
||||
logger.error("Audit configuration update failed because cannot parse value=\"{}\".", new_value);
|
||||
return;
|
||||
}
|
||||
|
||||
// If update_config is called with an invalid new_value, this line is not reached.
|
||||
// But logging the invalid value must be avoided later, when a different configuration parameter is changed to a correct value.
|
||||
// That's why values from _audited_{categories, keyspaces, tables} are logged instead of _cfg.audit_{categories, keyspaces, tables}
|
||||
|
||||
// Each table as "keyspace.table_name" like in the configuration file
|
||||
auto table_entries = _audited_tables | std::views::transform([](const auto& pair) {
|
||||
return pair.second | std::views::transform([&](const std::string& table_name) {
|
||||
return fmt::format("{}.{}", pair.first, table_name);
|
||||
});
|
||||
}) | std::views::join;
|
||||
|
||||
logger.info(
|
||||
"Audit configuration is updated. Auditing to: \"{}\", with the following categories: \"{}\", keyspaces: \"{}\", and tables: \"{}\".",
|
||||
_cfg.audit(),
|
||||
fmt::join(std::views::transform(_audited_categories, category_to_string), ","),
|
||||
fmt::join(_audited_keyspaces, ","),
|
||||
fmt::join(table_entries, ","));
|
||||
}
|
||||
|
||||
}
|
||||
152
audit/audit.hh
152
audit/audit.hh
@@ -1,152 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "seastarx.hh"
|
||||
#include "utils/log.hh"
|
||||
#include "utils/observable.hh"
|
||||
#include "db/consistency_level.hh"
|
||||
#include "locator/token_metadata_fwd.hh"
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include <seastar/util/log.hh>
|
||||
|
||||
#include "enum_set.hh"
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace db {
|
||||
|
||||
class config;
|
||||
|
||||
}
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
class cql_statement;
|
||||
class query_processor;
|
||||
class query_options;
|
||||
|
||||
}
|
||||
|
||||
namespace service {
|
||||
|
||||
class migration_manager;
|
||||
class query_state;
|
||||
|
||||
}
|
||||
|
||||
namespace locator {
|
||||
|
||||
class shared_token_metadata;
|
||||
|
||||
}
|
||||
|
||||
namespace audit {
|
||||
|
||||
extern logging::logger logger;
|
||||
|
||||
class audit_exception : public std::exception {
|
||||
sstring _what;
|
||||
public:
|
||||
explicit audit_exception(sstring&& what) : _what(std::move(what)) { }
|
||||
const char* what() const noexcept override {
|
||||
return _what.c_str();
|
||||
}
|
||||
};
|
||||
|
||||
enum class statement_category {
|
||||
QUERY, DML, DDL, DCL, AUTH, ADMIN
|
||||
};
|
||||
|
||||
using category_set = enum_set<super_enum<statement_category, statement_category::QUERY,
|
||||
statement_category::DML,
|
||||
statement_category::DDL,
|
||||
statement_category::DCL,
|
||||
statement_category::AUTH,
|
||||
statement_category::ADMIN>>;
|
||||
|
||||
class audit_info final {
|
||||
statement_category _category;
|
||||
sstring _keyspace;
|
||||
sstring _table;
|
||||
sstring _query;
|
||||
public:
|
||||
audit_info(statement_category cat, sstring keyspace, sstring table)
|
||||
: _category(cat)
|
||||
, _keyspace(std::move(keyspace))
|
||||
, _table(std::move(table))
|
||||
{ }
|
||||
void set_query_string(const std::string_view& query_string) {
|
||||
_query = sstring(query_string);
|
||||
}
|
||||
const sstring& keyspace() const { return _keyspace; }
|
||||
const sstring& table() const { return _table; }
|
||||
const sstring& query() const { return _query; }
|
||||
sstring category_string() const;
|
||||
statement_category category() const { return _category; }
|
||||
};
|
||||
|
||||
using audit_info_ptr = std::unique_ptr<audit_info>;
|
||||
|
||||
class storage_helper;
|
||||
|
||||
class audit final : public seastar::async_sharded_service<audit> {
|
||||
locator::shared_token_metadata& _token_metadata;
|
||||
std::set<sstring> _audited_keyspaces;
|
||||
// Maps keyspace name to set of table names in that keyspace
|
||||
std::map<sstring, std::set<sstring>> _audited_tables;
|
||||
category_set _audited_categories;
|
||||
|
||||
sstring _storage_helper_class_name;
|
||||
std::unique_ptr<storage_helper> _storage_helper_ptr;
|
||||
|
||||
const db::config& _cfg;
|
||||
utils::observer<sstring> _cfg_keyspaces_observer;
|
||||
utils::observer<sstring> _cfg_tables_observer;
|
||||
utils::observer<sstring> _cfg_categories_observer;
|
||||
|
||||
template<class T>
|
||||
void update_config(const sstring & new_value, std::function<T(const sstring&)> parse_func, T& cfg_parameter);
|
||||
|
||||
bool should_log_table(const sstring& keyspace, const sstring& name) const;
|
||||
public:
|
||||
static seastar::sharded<audit>& audit_instance() {
|
||||
// FIXME: leaked intentionally to avoid shutdown problems, see #293
|
||||
static seastar::sharded<audit>* audit_inst = new seastar::sharded<audit>();
|
||||
|
||||
return *audit_inst;
|
||||
}
|
||||
|
||||
static audit& local_audit_instance() {
|
||||
return audit_instance().local();
|
||||
}
|
||||
static future<> create_audit(const db::config& cfg, sharded<locator::shared_token_metadata>& stm);
|
||||
static future<> start_audit(const db::config& cfg, sharded<cql3::query_processor>& qp, sharded<service::migration_manager>& mm);
|
||||
static future<> stop_audit();
|
||||
static audit_info_ptr create_audit_info(statement_category cat, const sstring& keyspace, const sstring& table);
|
||||
static audit_info_ptr create_no_audit_info();
|
||||
audit(locator::shared_token_metadata& stm, sstring&& storage_helper_name,
|
||||
std::set<sstring>&& audited_keyspaces,
|
||||
std::map<sstring, std::set<sstring>>&& audited_tables,
|
||||
category_set&& audited_categories,
|
||||
const db::config& cfg);
|
||||
~audit();
|
||||
future<> start(const db::config& cfg, cql3::query_processor& qp, service::migration_manager& mm);
|
||||
future<> stop();
|
||||
future<> shutdown();
|
||||
bool should_log(const audit_info* audit_info) const;
|
||||
bool should_log_login() const { return _audited_categories.contains(statement_category::AUTH); }
|
||||
future<> log(const audit_info* audit_info, service::query_state& query_state, const cql3::query_options& options, bool error);
|
||||
future<> log_login(const sstring& username, socket_address client_ip, bool error) noexcept;
|
||||
};
|
||||
|
||||
future<> inspect(shared_ptr<cql3::cql_statement> statement, service::query_state& query_state, const cql3::query_options& options, bool error);
|
||||
|
||||
future<> inspect_login(const sstring& username, socket_address client_ip, bool error);
|
||||
|
||||
}
|
||||
@@ -1,202 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "audit/audit_cf_storage_helper.hh"
|
||||
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "data_dictionary/keyspace_metadata.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/statements/ks_prop_defs.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
|
||||
namespace audit {
|
||||
|
||||
const sstring audit_cf_storage_helper::KEYSPACE_NAME("audit");
|
||||
const sstring audit_cf_storage_helper::TABLE_NAME("audit_log");
|
||||
|
||||
audit_cf_storage_helper::audit_cf_storage_helper(cql3::query_processor& qp, service::migration_manager& mm)
|
||||
: _qp(qp)
|
||||
, _mm(mm)
|
||||
, _table(KEYSPACE_NAME, TABLE_NAME,
|
||||
fmt::format("CREATE TABLE IF NOT EXISTS {}.{} ("
|
||||
"date timestamp, "
|
||||
"node inet, "
|
||||
"event_time timeuuid, "
|
||||
"category text, "
|
||||
"consistency text, "
|
||||
"table_name text, "
|
||||
"keyspace_name text, "
|
||||
"operation text, "
|
||||
"source inet, "
|
||||
"username text, "
|
||||
"error boolean, "
|
||||
"PRIMARY KEY ((date, node), event_time))",
|
||||
KEYSPACE_NAME, TABLE_NAME),
|
||||
fmt::format("INSERT INTO {}.{} ("
|
||||
"date,"
|
||||
"node,"
|
||||
"event_time,"
|
||||
"category,"
|
||||
"consistency,"
|
||||
"table_name,"
|
||||
"keyspace_name,"
|
||||
"operation,"
|
||||
"source,"
|
||||
"username,"
|
||||
"error) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
KEYSPACE_NAME, TABLE_NAME))
|
||||
, _dummy_query_state(service::client_state::for_internal_calls(), empty_service_permit())
|
||||
{
|
||||
}
|
||||
|
||||
future<> audit_cf_storage_helper::migrate_audit_table(service::group0_guard group0_guard) {
|
||||
while (true) {
|
||||
auto const ks = _qp.db().try_find_keyspace(KEYSPACE_NAME);
|
||||
if (ks && ks->metadata()->strategy_name() == "org.apache.cassandra.locator.SimpleStrategy") {
|
||||
data_dictionary::database db = _qp.db();
|
||||
cql3::statements::ks_prop_defs old_ks_prop_defs;
|
||||
auto old_ks_metadata = old_ks_prop_defs.as_ks_metadata_update(
|
||||
ks->metadata(), *_qp.proxy().get_token_metadata_ptr(), db.features());
|
||||
std::map<sstring, sstring> strategy_opts;
|
||||
for (const auto &dc: _qp.proxy().get_token_metadata_ptr()->get_topology().get_datacenters())
|
||||
strategy_opts[dc] = "3";
|
||||
|
||||
auto new_ks_metadata = keyspace_metadata::new_keyspace(KEYSPACE_NAME,
|
||||
"org.apache.cassandra.locator.NetworkTopologyStrategy",
|
||||
strategy_opts,
|
||||
std::nullopt, // initial_tablets
|
||||
old_ks_metadata->durable_writes(),
|
||||
old_ks_metadata->get_storage_options(),
|
||||
old_ks_metadata->tables());
|
||||
auto ts = group0_guard.write_timestamp();
|
||||
try {
|
||||
co_await _mm.announce(
|
||||
service::prepare_keyspace_update_announcement(db.real_database(), new_ks_metadata, ts),
|
||||
std::move(group0_guard), format("audit: Alter {} keyspace", KEYSPACE_NAME));
|
||||
break;
|
||||
} catch (::service::group0_concurrent_modification &) {
|
||||
logger.info("Concurrent operation is detected while altering {} keyspace, retrying.", KEYSPACE_NAME);
|
||||
}
|
||||
group0_guard = co_await _mm.start_group0_operation();
|
||||
} else {
|
||||
co_return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
future<> audit_cf_storage_helper::start(const db::config &cfg) {
|
||||
if (this_shard_id() != 0) {
|
||||
co_return;
|
||||
}
|
||||
|
||||
if (auto ks = _qp.db().try_find_keyspace(KEYSPACE_NAME);
|
||||
!ks ||
|
||||
ks->metadata()->strategy_name() == "org.apache.cassandra.locator.SimpleStrategy") {
|
||||
|
||||
auto group0_guard = co_await _mm.start_group0_operation();
|
||||
if (ks = _qp.db().try_find_keyspace(KEYSPACE_NAME); !ks) {
|
||||
// releasing, because table_helper::setup_keyspace creates a raft guard of its own
|
||||
service::release_guard(std::move(group0_guard));
|
||||
co_return co_await table_helper::setup_keyspace(_qp, _mm, KEYSPACE_NAME,
|
||||
"org.apache.cassandra.locator.NetworkTopologyStrategy",
|
||||
"3", _dummy_query_state, {&_table});
|
||||
} else if (ks->metadata()->strategy_name() == "org.apache.cassandra.locator.SimpleStrategy") {
|
||||
// We want to migrate the old (pre-Scylla 6.0) SimpleStrategy to a newer one.
|
||||
// The migrate_audit_table() function will do nothing if it races with another strategy change:
|
||||
// - either by another node doing the same thing in parallel,
|
||||
// - or a user manually changing the strategy of the same table.
|
||||
// Note we only check the strategy, not the replication factor.
|
||||
co_return co_await migrate_audit_table(std::move(group0_guard));
|
||||
} else {
|
||||
co_return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
future<> audit_cf_storage_helper::stop() {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> audit_cf_storage_helper::write(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error) {
|
||||
return _table.insert(_qp, _mm, _dummy_query_state, make_data, audit_info, node_ip, client_ip, cl, username, error);
|
||||
}
|
||||
|
||||
future<> audit_cf_storage_helper::write_login(const sstring& username,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
bool error) {
|
||||
return _table.insert(_qp, _mm, _dummy_query_state, make_login_data, node_ip, client_ip, username, error);
|
||||
}
|
||||
|
||||
cql3::query_options audit_cf_storage_helper::make_data(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error) {
|
||||
auto time = std::chrono::system_clock::now();
|
||||
auto millis_since_epoch = std::chrono::duration_cast<std::chrono::milliseconds>(time.time_since_epoch()).count();
|
||||
auto ticks_per_day = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::hours(24)).count();
|
||||
auto date = millis_since_epoch / ticks_per_day * ticks_per_day;
|
||||
thread_local static int64_t last_nanos = 0;
|
||||
auto time_id = utils::UUID_gen::get_time_UUID(table_helper::make_monotonic_UUID_tp(last_nanos, time));
|
||||
auto consistency_level = fmt::format("{}", cl);
|
||||
std::vector<cql3::raw_value> values {
|
||||
cql3::raw_value::make_value(timestamp_type->decompose(date)),
|
||||
cql3::raw_value::make_value(inet_addr_type->decompose(node_ip.addr())),
|
||||
cql3::raw_value::make_value(uuid_type->decompose(time_id)),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(audit_info->category_string())),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(sstring(consistency_level))),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(audit_info->table())),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(audit_info->keyspace())),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(audit_info->query())),
|
||||
cql3::raw_value::make_value(inet_addr_type->decompose(client_ip.addr())),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(username)),
|
||||
cql3::raw_value::make_value(boolean_type->decompose(error)),
|
||||
};
|
||||
return cql3::query_options(cql3::default_cql_config, db::consistency_level::ONE, std::nullopt, std::move(values), false, cql3::query_options::specific_options::DEFAULT);
|
||||
}
|
||||
|
||||
cql3::query_options audit_cf_storage_helper::make_login_data(socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
const sstring& username,
|
||||
bool error) {
|
||||
auto time = std::chrono::system_clock::now();
|
||||
auto millis_since_epoch = std::chrono::duration_cast<std::chrono::milliseconds>(time.time_since_epoch()).count();
|
||||
auto ticks_per_day = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::hours(24)).count();
|
||||
auto date = millis_since_epoch / ticks_per_day * ticks_per_day;
|
||||
thread_local static int64_t last_nanos = 0;
|
||||
auto time_id = utils::UUID_gen::get_time_UUID(table_helper::make_monotonic_UUID_tp(last_nanos, time));
|
||||
std::vector<cql3::raw_value> values {
|
||||
cql3::raw_value::make_value(timestamp_type->decompose(date)),
|
||||
cql3::raw_value::make_value(inet_addr_type->decompose(node_ip.addr())),
|
||||
cql3::raw_value::make_value(uuid_type->decompose(time_id)),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(sstring("AUTH"))),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(sstring(""))),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(sstring(""))),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(sstring(""))),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(sstring("LOGIN"))),
|
||||
cql3::raw_value::make_value(inet_addr_type->decompose(client_ip.addr())),
|
||||
cql3::raw_value::make_value(utf8_type->decompose(username)),
|
||||
cql3::raw_value::make_value(boolean_type->decompose(error)),
|
||||
};
|
||||
return cql3::query_options(cql3::default_cql_config, db::consistency_level::ONE, std::nullopt, std::move(values), false, cql3::query_options::specific_options::DEFAULT);
|
||||
}
|
||||
|
||||
using registry = class_registrator<storage_helper, audit_cf_storage_helper, cql3::query_processor&, service::migration_manager&>;
|
||||
static registry registrator1("audit_cf_storage_helper");
|
||||
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "audit/audit.hh"
|
||||
#include "table_helper.hh"
|
||||
#include "storage_helper.hh"
|
||||
#include "db/config.hh"
|
||||
#include "service/raft/raft_group0_client.hh"
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
class query_processor;
|
||||
|
||||
}
|
||||
|
||||
namespace service {
|
||||
|
||||
class migration_manager;
|
||||
|
||||
}
|
||||
|
||||
namespace audit {
|
||||
|
||||
class audit_cf_storage_helper : public storage_helper {
|
||||
static const sstring KEYSPACE_NAME;
|
||||
static const sstring TABLE_NAME;
|
||||
cql3::query_processor& _qp;
|
||||
service::migration_manager& _mm;
|
||||
table_helper _table;
|
||||
service::query_state _dummy_query_state;
|
||||
static cql3::query_options make_data(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error);
|
||||
static cql3::query_options make_login_data(socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
const sstring& username,
|
||||
bool error);
|
||||
|
||||
future<> migrate_audit_table(service::group0_guard guard);
|
||||
|
||||
public:
|
||||
explicit audit_cf_storage_helper(cql3::query_processor& qp, service::migration_manager& mm);
|
||||
virtual ~audit_cf_storage_helper() {}
|
||||
virtual future<> start(const db::config& cfg) override;
|
||||
virtual future<> stop() override;
|
||||
virtual future<> write(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error) override;
|
||||
virtual future<> write_login(const sstring& username,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
bool error) override;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "audit/audit_syslog_storage_helper.hh"
|
||||
|
||||
#include <sys/socket.h>
|
||||
#include <string.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <syslog.h>
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/net/api.hh>
|
||||
|
||||
#include <fmt/chrono.h>
|
||||
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
class query_processor;
|
||||
|
||||
}
|
||||
|
||||
namespace audit {
|
||||
|
||||
namespace {
|
||||
|
||||
future<> syslog_send_helper(net::datagram_channel& sender,
|
||||
const socket_address& address,
|
||||
const sstring& msg) {
|
||||
return sender.send(address, net::packet{msg.data(), msg.size()}).handle_exception([address](auto&& exception_ptr) {
|
||||
auto error_msg = seastar::format(
|
||||
"Syslog audit backend failed (sending a message to {} resulted in {}).",
|
||||
address,
|
||||
exception_ptr
|
||||
);
|
||||
logger.error("{}", error_msg);
|
||||
throw audit_exception(std::move(error_msg));
|
||||
});
|
||||
}
|
||||
|
||||
static auto syslog_address_helper(const db::config& cfg)
|
||||
{
|
||||
return cfg.audit_unix_socket_path.is_set()
|
||||
? unix_domain_addr(cfg.audit_unix_socket_path())
|
||||
: unix_domain_addr(_PATH_LOG);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
audit_syslog_storage_helper::audit_syslog_storage_helper(cql3::query_processor& qp, service::migration_manager&) :
|
||||
_syslog_address(syslog_address_helper(qp.db().get_config())),
|
||||
_sender(make_unbound_datagram_channel(AF_UNIX)) {
|
||||
}
|
||||
|
||||
audit_syslog_storage_helper::~audit_syslog_storage_helper() {
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't use openlog and syslog directly because it's already used by logger.
|
||||
* Audit needs to use different ident so than logger but syslog.h uses a global ident
|
||||
* and it's not possible to use more than one in a program.
|
||||
*
|
||||
* To work around it we directly communicate with the socket.
|
||||
*/
|
||||
future<> audit_syslog_storage_helper::start(const db::config& cfg) {
|
||||
if (this_shard_id() != 0) {
|
||||
return make_ready_future();
|
||||
}
|
||||
|
||||
return syslog_send_helper(_sender, _syslog_address, "Initializing syslog audit backend.");
|
||||
}
|
||||
|
||||
future<> audit_syslog_storage_helper::stop() {
|
||||
_sender.shutdown_output();
|
||||
co_return;
|
||||
}
|
||||
|
||||
future<> audit_syslog_storage_helper::write(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error) {
|
||||
auto now = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
|
||||
tm time;
|
||||
localtime_r(&now, &time);
|
||||
sstring msg = seastar::format("<{}>{:%h %e %T} scylla-audit: \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\"",
|
||||
LOG_NOTICE | LOG_USER,
|
||||
time,
|
||||
node_ip,
|
||||
audit_info->category_string(),
|
||||
cl,
|
||||
(error ? "true" : "false"),
|
||||
audit_info->keyspace(),
|
||||
audit_info->query(),
|
||||
client_ip,
|
||||
audit_info->table(),
|
||||
username);
|
||||
|
||||
return syslog_send_helper(_sender, _syslog_address, msg);
|
||||
}
|
||||
|
||||
future<> audit_syslog_storage_helper::write_login(const sstring& username,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
bool error) {
|
||||
|
||||
auto now = std::chrono::system_clock::to_time_t(std::chrono::system_clock::now());
|
||||
tm time;
|
||||
localtime_r(&now, &time);
|
||||
sstring msg = seastar::format("<{}>{:%h %e %T} scylla-audit: \"{}\", \"AUTH\", \"\", \"\", \"\", \"\", \"{}\", \"{}\", \"{}\"",
|
||||
LOG_NOTICE | LOG_USER,
|
||||
time,
|
||||
node_ip,
|
||||
client_ip,
|
||||
username,
|
||||
(error ? "true" : "false"));
|
||||
|
||||
co_await syslog_send_helper(_sender, _syslog_address, msg.c_str());
|
||||
}
|
||||
|
||||
using registry = class_registrator<storage_helper, audit_syslog_storage_helper, cql3::query_processor&, service::migration_manager&>;
|
||||
static registry registrator1("audit_syslog_storage_helper");
|
||||
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <seastar/net/api.hh>
|
||||
|
||||
#include "audit/audit.hh"
|
||||
#include "storage_helper.hh"
|
||||
#include "db/config.hh"
|
||||
|
||||
namespace service {
|
||||
|
||||
class migration_manager;
|
||||
|
||||
};
|
||||
|
||||
namespace audit {
|
||||
|
||||
class audit_syslog_storage_helper : public storage_helper {
|
||||
socket_address _syslog_address;
|
||||
net::datagram_channel _sender;
|
||||
public:
|
||||
explicit audit_syslog_storage_helper(cql3::query_processor&, service::migration_manager&);
|
||||
virtual ~audit_syslog_storage_helper();
|
||||
virtual future<> start(const db::config& cfg) override;
|
||||
virtual future<> stop() override;
|
||||
virtual future<> write(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error) override;
|
||||
virtual future<> write_login(const sstring& username,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
bool error) override;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "audit/audit.hh"
|
||||
#include <seastar/core/future.hh>
|
||||
|
||||
namespace audit {
|
||||
|
||||
class storage_helper {
|
||||
public:
|
||||
using ptr_type = std::unique_ptr<storage_helper>;
|
||||
storage_helper() {}
|
||||
virtual ~storage_helper() {}
|
||||
virtual future<> start(const db::config& cfg) = 0;
|
||||
virtual future<> stop() = 0;
|
||||
virtual future<> write(const audit_info* audit_info,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
db::consistency_level cl,
|
||||
const sstring& username,
|
||||
bool error) = 0;
|
||||
virtual future<> write_login(const sstring& username,
|
||||
socket_address node_ip,
|
||||
socket_address client_ip,
|
||||
bool error) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1,6 +1,4 @@
|
||||
include(add_whole_archive)
|
||||
find_package(OpenLDAP REQUIRED
|
||||
ldap)
|
||||
|
||||
add_library(scylla_auth STATIC)
|
||||
target_sources(scylla_auth
|
||||
@@ -12,7 +10,6 @@ target_sources(scylla_auth
|
||||
certificate_authenticator.cc
|
||||
common.cc
|
||||
default_authorizer.cc
|
||||
ldap_role_manager.cc
|
||||
password_authenticator.cc
|
||||
passwords.cc
|
||||
permission.cc
|
||||
@@ -21,7 +18,6 @@ target_sources(scylla_auth
|
||||
role_or_anonymous.cc
|
||||
roles-metadata.cc
|
||||
sasl_challenge.cc
|
||||
saslauthd_authenticator.cc
|
||||
service.cc
|
||||
standard_role_manager.cc
|
||||
transitional.cc
|
||||
@@ -35,14 +31,12 @@ target_link_libraries(scylla_auth
|
||||
xxHash::xxhash
|
||||
PRIVATE
|
||||
absl::headers
|
||||
OpenLDAP::ldap
|
||||
cql3
|
||||
idl
|
||||
ldap
|
||||
wasmtime_bindings
|
||||
libxcrypt::libxcrypt)
|
||||
|
||||
add_whole_archive(auth scylla_auth)
|
||||
|
||||
check_headers(check-headers scylla_auth
|
||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
|
||||
@@ -83,10 +83,6 @@ public:
|
||||
virtual ::shared_ptr<sasl_challenge> new_sasl_challenge() const override {
|
||||
throw std::runtime_error("Should not reach");
|
||||
}
|
||||
|
||||
virtual future<> ensure_superuser_is_created() const override {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -14,8 +14,6 @@
|
||||
|
||||
const sstring auth::authenticator::USERNAME_KEY("username");
|
||||
const sstring auth::authenticator::PASSWORD_KEY("password");
|
||||
const sstring auth::authenticator::SERVICE_KEY("service");
|
||||
const sstring auth::authenticator::REALM_KEY("realm");
|
||||
|
||||
future<std::optional<auth::authenticated_user>> auth::authenticator::authenticate(session_dn_func) const {
|
||||
return make_ready_future<std::optional<auth::authenticated_user>>(std::nullopt);
|
||||
|
||||
@@ -67,12 +67,6 @@ public:
|
||||
///
|
||||
static const sstring PASSWORD_KEY;
|
||||
|
||||
/// Service for SASL authentication.
|
||||
static const sstring SERVICE_KEY;
|
||||
|
||||
/// Realm for SASL authentication.
|
||||
static const sstring REALM_KEY;
|
||||
|
||||
using credentials_map = std::unordered_map<sstring, sstring>;
|
||||
|
||||
virtual ~authenticator() = default;
|
||||
@@ -159,8 +153,6 @@ public:
|
||||
virtual const resource_set& protected_resources() const = 0;
|
||||
|
||||
virtual ::shared_ptr<sasl_challenge> new_sasl_challenge() const = 0;
|
||||
|
||||
virtual future<> ensure_superuser_is_created() const = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -56,10 +56,6 @@ public:
|
||||
const resource_set& protected_resources() const override;
|
||||
|
||||
::shared_ptr<sasl_challenge> new_sasl_challenge() const override;
|
||||
|
||||
virtual future<> ensure_superuser_is_created() const override {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
private:
|
||||
};
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ extern "C" {
|
||||
#include <unistd.h>
|
||||
}
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/range.hpp>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/sleep.hh>
|
||||
|
||||
|
||||
@@ -1,345 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "ldap_role_manager.hh"
|
||||
|
||||
#include <boost/algorithm/string/replace.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/ranges.h>
|
||||
#include <ldap.h>
|
||||
#include <seastar/core/seastar.hh>
|
||||
#include <seastar/core/sstring.hh>
|
||||
#include <seastar/net/dns.hh>
|
||||
#include <seastar/util/log.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <vector>
|
||||
|
||||
#include "common.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include "seastarx.hh"
|
||||
#include "service/raft/raft_group0_client.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "db/config.hh"
|
||||
#include "utils/exponential_backoff_retry.hh"
|
||||
|
||||
namespace {
|
||||
|
||||
logger mylog{"ldap_role_manager"}; // `log` is taken by math.
|
||||
|
||||
struct url_desc_deleter {
|
||||
void operator()(LDAPURLDesc *p) {
|
||||
ldap_free_urldesc(p);
|
||||
}
|
||||
};
|
||||
|
||||
using url_desc_ptr = std::unique_ptr<LDAPURLDesc, url_desc_deleter>;
|
||||
|
||||
url_desc_ptr parse_url(std::string_view url) {
|
||||
LDAPURLDesc *desc = nullptr;
|
||||
if (ldap_url_parse(url.data(), &desc)) {
|
||||
mylog.error("error in ldap_url_parse({})", url);
|
||||
}
|
||||
return url_desc_ptr(desc);
|
||||
}
|
||||
|
||||
/// Extracts attribute \p attr from all entries in \p res.
|
||||
std::vector<sstring> get_attr_values(LDAP* ld, LDAPMessage* res, const char* attr) {
|
||||
std::vector<sstring> values;
|
||||
mylog.debug("Analyzing search results");
|
||||
for (auto e = ldap_first_entry(ld, res); e; e = ldap_next_entry(ld, e)) {
|
||||
struct deleter {
|
||||
void operator()(berval** p) { ldap_value_free_len(p); }
|
||||
void operator()(char* p) { ldap_memfree(p); }
|
||||
};
|
||||
const std::unique_ptr<char, deleter> dname(ldap_get_dn(ld, e));
|
||||
mylog.debug("Analyzing entry {}", dname.get());
|
||||
const std::unique_ptr<berval*, deleter> vals(ldap_get_values_len(ld, e, attr));
|
||||
if (!vals) {
|
||||
mylog.warn("LDAP entry {} has no attribute {}", dname.get(), attr);
|
||||
continue;
|
||||
}
|
||||
for (size_t i = 0; vals.get()[i]; ++i) {
|
||||
values.emplace_back(vals.get()[i]->bv_val, vals.get()[i]->bv_len);
|
||||
}
|
||||
}
|
||||
mylog.debug("Done analyzing search results; extracted roles {}", values);
|
||||
return values;
|
||||
}
|
||||
|
||||
const char* ldap_role_manager_full_name = "com.scylladb.auth.LDAPRoleManager";
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
namespace auth {
|
||||
|
||||
static const class_registrator<
|
||||
role_manager,
|
||||
ldap_role_manager,
|
||||
cql3::query_processor&,
|
||||
::service::raft_group0_client&,
|
||||
::service::migration_manager&> registration(ldap_role_manager_full_name);
|
||||
|
||||
ldap_role_manager::ldap_role_manager(
|
||||
std::string_view query_template, std::string_view target_attr, std::string_view bind_name, std::string_view bind_password,
|
||||
cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm)
|
||||
: _std_mgr(qp, rg0c, mm), _group0_client(rg0c), _query_template(query_template), _target_attr(target_attr), _bind_name(bind_name)
|
||||
, _bind_password(bind_password)
|
||||
, _connection_factory(bind(std::mem_fn(&ldap_role_manager::reconnect), std::ref(*this))) {
|
||||
}
|
||||
|
||||
ldap_role_manager::ldap_role_manager(cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm)
|
||||
: ldap_role_manager(
|
||||
qp.db().get_config().ldap_url_template(),
|
||||
qp.db().get_config().ldap_attr_role(),
|
||||
qp.db().get_config().ldap_bind_dn(),
|
||||
qp.db().get_config().ldap_bind_passwd(),
|
||||
qp,
|
||||
rg0c,
|
||||
mm) {
|
||||
}
|
||||
|
||||
std::string_view ldap_role_manager::qualified_java_name() const noexcept {
|
||||
return ldap_role_manager_full_name;
|
||||
}
|
||||
|
||||
const resource_set& ldap_role_manager::protected_resources() const {
|
||||
return _std_mgr.protected_resources();
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::start() {
|
||||
if (!parse_url(get_url("dummy-user"))) { // Just need host and port -- any user should do.
|
||||
return make_exception_future(
|
||||
std::runtime_error(fmt::format("error getting LDAP server address from template {}", _query_template)));
|
||||
}
|
||||
return _std_mgr.start();
|
||||
}
|
||||
|
||||
using conn_ptr = lw_shared_ptr<ldap_connection>;
|
||||
|
||||
future<conn_ptr> ldap_role_manager::connect() {
|
||||
const auto desc = parse_url(get_url("dummy-user")); // Just need host and port -- any user should do.
|
||||
if (!desc) {
|
||||
co_return coroutine::exception(std::make_exception_ptr(std::runtime_error("connect attempted before a successful start")));
|
||||
}
|
||||
net::inet_address host = co_await net::dns::resolve_name(desc->lud_host);
|
||||
const socket_address addr(host, uint16_t(desc->lud_port));
|
||||
connected_socket sock = co_await seastar::connect(addr);
|
||||
auto conn = make_lw_shared<ldap_connection>(std::move(sock));
|
||||
sstring error;
|
||||
try {
|
||||
ldap_msg_ptr response = co_await conn->simple_bind(_bind_name.c_str(), _bind_password.c_str());
|
||||
if (!response || ldap_msgtype(response.get()) != LDAP_RES_BIND) {
|
||||
error = format("simple_bind error: {}", conn->get_error());
|
||||
}
|
||||
} catch (...) {
|
||||
error = format("connect error: {}", std::current_exception());
|
||||
}
|
||||
if (!error.empty()) {
|
||||
co_await conn->close();
|
||||
co_return coroutine::exception(std::make_exception_ptr(std::runtime_error(std::move(error))));
|
||||
}
|
||||
co_return std::move(conn);
|
||||
}
|
||||
|
||||
future<conn_ptr> ldap_role_manager::reconnect() {
|
||||
unsigned retries_left = 5;
|
||||
using namespace std::literals::chrono_literals;
|
||||
conn_ptr conn = co_await exponential_backoff_retry::do_until_value(1s, 32s, _as, [this, &retries_left] () -> future<std::optional<conn_ptr>> {
|
||||
if (!retries_left) {
|
||||
co_return conn_ptr{};
|
||||
}
|
||||
mylog.trace("reconnect() retrying ({} attempts left)", retries_left);
|
||||
--retries_left;
|
||||
try {
|
||||
co_return co_await connect();
|
||||
} catch (...) {
|
||||
mylog.error("error in reconnect: {}", std::current_exception());
|
||||
}
|
||||
co_return std::nullopt;
|
||||
});
|
||||
|
||||
mylog.trace("reconnect() finished backoff, conn={}", reinterpret_cast<void*>(conn.get()));
|
||||
if (conn) {
|
||||
co_return std::move(conn);
|
||||
}
|
||||
co_return coroutine::exception(std::make_exception_ptr(std::runtime_error("reconnect failed after 5 attempts")));
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::stop() {
|
||||
_as.request_abort();
|
||||
return _std_mgr.stop().then([this] { return _connection_factory.stop(); });
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::create(std::string_view name, const role_config& config, ::service::group0_batch& mc) {
|
||||
return _std_mgr.create(name, config, mc);
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::drop(std::string_view name, ::service::group0_batch& mc) {
|
||||
return _std_mgr.drop(name, mc);
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::alter(std::string_view name, const role_config_update& config, ::service::group0_batch& mc) {
|
||||
return _std_mgr.alter(name, config, mc);
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::grant(std::string_view, std::string_view, ::service::group0_batch& mc) {
|
||||
return make_exception_future<>(exceptions::invalid_request_exception("Cannot grant roles with LDAPRoleManager."));
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::revoke(std::string_view, std::string_view, ::service::group0_batch& mc) {
|
||||
return make_exception_future<>(exceptions::invalid_request_exception("Cannot revoke roles with LDAPRoleManager."));
|
||||
}
|
||||
|
||||
future<role_set> ldap_role_manager::query_granted(std::string_view grantee_name, recursive_role_query) {
|
||||
const auto url = get_url(grantee_name.data());
|
||||
auto desc = parse_url(url);
|
||||
if (!desc) {
|
||||
return make_exception_future<role_set>(std::runtime_error(format("Error parsing URL {}", url)));
|
||||
}
|
||||
return _connection_factory.with_connection([this, desc = std::move(desc), grantee_name_ = sstring(grantee_name)]
|
||||
(ldap_connection& conn) -> future<role_set> {
|
||||
sstring grantee_name = std::move(grantee_name_);
|
||||
ldap_msg_ptr res = co_await conn.search(desc->lud_dn, desc->lud_scope, desc->lud_filter, desc->lud_attrs,
|
||||
/*attrsonly=*/0, /*serverctrls=*/nullptr, /*clientctrls=*/nullptr,
|
||||
/*timeout=*/nullptr, /*sizelimit=*/0);
|
||||
mylog.trace("query_granted: got search results");
|
||||
const auto mtype = ldap_msgtype(res.get());
|
||||
if (mtype != LDAP_RES_SEARCH_ENTRY && mtype != LDAP_RES_SEARCH_RESULT && mtype != LDAP_RES_SEARCH_REFERENCE) {
|
||||
mylog.error("ldap search yielded result {} of type {}", static_cast<const void*>(res.get()), mtype);
|
||||
co_return coroutine::exception(std::make_exception_ptr(std::runtime_error("ldap_role_manager: search result has wrong type")));
|
||||
}
|
||||
std::vector<sstring> values = get_attr_values(conn.get_ldap(), res.get(), _target_attr.c_str());
|
||||
auth::role_set valid_roles{grantee_name};
|
||||
|
||||
// Each value is a role to be granted.
|
||||
co_await parallel_for_each(values, [this, &valid_roles] (const sstring& ldap_role) {
|
||||
return _std_mgr.exists(ldap_role).then([&valid_roles, &ldap_role] (bool exists) {
|
||||
if (exists) {
|
||||
valid_roles.insert(ldap_role);
|
||||
} else {
|
||||
mylog.error("unrecognized role received from LDAP: {}", ldap_role);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
co_return std::move(valid_roles);
|
||||
});
|
||||
}
|
||||
|
||||
future<role_to_directly_granted_map>
|
||||
ldap_role_manager::query_all_directly_granted() {
|
||||
role_to_directly_granted_map result;
|
||||
auto roles = co_await query_all();
|
||||
for (auto& role: roles) {
|
||||
auto granted_set = co_await query_granted(role, recursive_role_query::no);
|
||||
for (auto& granted: granted_set) {
|
||||
if (granted != role) {
|
||||
result.insert({role, granted});
|
||||
}
|
||||
}
|
||||
}
|
||||
co_return result;
|
||||
}
|
||||
|
||||
future<role_set> ldap_role_manager::query_all() {
|
||||
return _std_mgr.query_all();
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::create_role(std::string_view role_name) {
|
||||
return smp::submit_to(0, [this, role_name] () -> future<> {
|
||||
int retries = 10;
|
||||
while (true) {
|
||||
auto guard = co_await _group0_client.start_operation(_as, ::service::raft_timeout{});
|
||||
::service::group0_batch batch(std::move(guard));
|
||||
auto cfg = role_config{.can_login = true};
|
||||
try {
|
||||
co_await create(role_name, cfg, batch);
|
||||
co_await std::move(batch).commit(_group0_client, _as, ::service::raft_timeout{});
|
||||
} catch (const role_already_exists&) {
|
||||
// ok
|
||||
} catch (const ::service::group0_concurrent_modification& ex) {
|
||||
mylog.warn("Failed to auto-create role \"{}\" due to guard conflict.{}.",
|
||||
role_name, retries ? " Retrying" : " Number of retries exceeded, giving up");
|
||||
if (retries--) {
|
||||
continue;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
break;
|
||||
}
|
||||
// make sure to wait until create mutations are applied locally
|
||||
(void)(co_await _group0_client.start_operation(_as, ::service::raft_timeout{}));
|
||||
});
|
||||
}
|
||||
|
||||
future<bool> ldap_role_manager::exists(std::string_view role_name) {
|
||||
bool exists = co_await _std_mgr.exists(role_name);
|
||||
if (exists) {
|
||||
co_return true;
|
||||
}
|
||||
role_set roles = co_await query_granted(role_name, recursive_role_query::yes);
|
||||
// A role will get auto-created if it's already assigned any permissions.
|
||||
// The role set will always contains at least a single entry (the role itself),
|
||||
// so auto-creation is only triggered if at least one more external role is assigned.
|
||||
if (roles.size() > 1) {
|
||||
mylog.info("Auto-creating user {}", role_name);
|
||||
try {
|
||||
co_await create_role(role_name);
|
||||
exists = true;
|
||||
} catch (...) {
|
||||
mylog.error("Failed to auto-create role {}: {}", role_name, std::current_exception());
|
||||
exists = false;
|
||||
}
|
||||
co_return exists;
|
||||
}
|
||||
mylog.debug("Role {} will not be auto-created", role_name);
|
||||
co_return false;
|
||||
}
|
||||
|
||||
future<bool> ldap_role_manager::is_superuser(std::string_view role_name) {
|
||||
return _std_mgr.is_superuser(role_name);
|
||||
}
|
||||
|
||||
future<bool> ldap_role_manager::can_login(std::string_view role_name) {
|
||||
return _std_mgr.can_login(role_name);
|
||||
}
|
||||
|
||||
future<std::optional<sstring>> ldap_role_manager::get_attribute(
|
||||
std::string_view role_name, std::string_view attribute_name) {
|
||||
return _std_mgr.get_attribute(role_name, attribute_name);
|
||||
}
|
||||
|
||||
future<role_manager::attribute_vals> ldap_role_manager::query_attribute_for_all(std::string_view attribute_name) {
|
||||
return _std_mgr.query_attribute_for_all(attribute_name);
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::set_attribute(
|
||||
std::string_view role_name, std::string_view attribute_name, std::string_view attribute_value, ::service::group0_batch& mc) {
|
||||
return _std_mgr.set_attribute(role_name, attribute_value, attribute_value, mc);
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::remove_attribute(std::string_view role_name, std::string_view attribute_name, ::service::group0_batch& mc) {
|
||||
return _std_mgr.remove_attribute(role_name, attribute_name, mc);
|
||||
}
|
||||
|
||||
sstring ldap_role_manager::get_url(std::string_view user) const {
|
||||
return boost::replace_all_copy(_query_template, "{USER}", user);
|
||||
}
|
||||
|
||||
future<std::vector<cql3::description>> ldap_role_manager::describe_role_grants() {
|
||||
// Since grants are performed by the ldap admin, we shouldn't echo them back
|
||||
co_return std::vector<cql3::description>();
|
||||
}
|
||||
|
||||
future<> ldap_role_manager::ensure_superuser_is_created() {
|
||||
// ldap is responsible for users
|
||||
co_return;
|
||||
}
|
||||
|
||||
} // namespace auth
|
||||
@@ -1,114 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2019 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "ent/ldap/ldap_connection.hh"
|
||||
#include "standard_role_manager.hh"
|
||||
|
||||
namespace auth {
|
||||
|
||||
/// Queries an LDAP server for roles.
|
||||
///
|
||||
/// Since LDAP grants and revokes roles, calling grant() and revoke() is disallowed.
|
||||
///
|
||||
/// We query LDAP for a list of a particular user's roles, and the results must match roles that exist in the
|
||||
/// database. Furthermore, the user must have already authenticated to Scylla, meaning it, too, exists in the
|
||||
/// database. Therefore, some of the role_manager functionality is provided by a standard_role_manager under
|
||||
/// the hood. For example, listing all roles or checking if the user can login cannot currently be determined
|
||||
/// by querying LDAP, so they are delegated to the standard_role_manager.
|
||||
class ldap_role_manager : public role_manager {
|
||||
standard_role_manager _std_mgr;
|
||||
::service::raft_group0_client& _group0_client;
|
||||
seastar::sstring _query_template; ///< LDAP URL dictating which query to make.
|
||||
seastar::sstring _target_attr; ///< LDAP entry attribute containing the Scylla role name.
|
||||
seastar::sstring _bind_name; ///< Username for LDAP simple bind.
|
||||
seastar::sstring _bind_password; ///< Password for LDAP simple bind.
|
||||
mutable ldap_reuser _connection_factory; // Potentially modified by query_granted().
|
||||
seastar::abort_source _as;
|
||||
public:
|
||||
ldap_role_manager(
|
||||
std::string_view query_template, ///< LDAP query template as described in Scylla documentation.
|
||||
std::string_view target_attr, ///< LDAP entry attribute containing the Scylla role name.
|
||||
std::string_view bind_name, ///< LDAP bind credentials.
|
||||
std::string_view bind_password, ///< LDAP bind credentials.
|
||||
cql3::query_processor& qp, ///< Passed to standard_role_manager.
|
||||
::service::raft_group0_client& rg0c, ///< Passed to standard_role_manager.
|
||||
::service::migration_manager& mm ///< Passed to standard_role_manager.
|
||||
);
|
||||
|
||||
/// Retrieves LDAP configuration entries from qp and invokes the other constructor. Required by
|
||||
/// class_registrator<role_manager>.
|
||||
ldap_role_manager(cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm);
|
||||
|
||||
/// Thrown when query-template parsing fails.
|
||||
struct url_error : public std::runtime_error {
|
||||
using runtime_error::runtime_error;
|
||||
};
|
||||
|
||||
std::string_view qualified_java_name() const noexcept override;
|
||||
|
||||
const resource_set& protected_resources() const override;
|
||||
|
||||
future<> start() override;
|
||||
|
||||
future<> stop() override;
|
||||
|
||||
future<> create(std::string_view, const role_config&, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> drop(std::string_view, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> alter(std::string_view, const role_config_update&, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> grant(std::string_view, std::string_view, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> revoke(std::string_view, std::string_view, ::service::group0_batch& mc) override;
|
||||
|
||||
future<role_set> query_granted(std::string_view, recursive_role_query) override;
|
||||
|
||||
future<role_to_directly_granted_map> query_all_directly_granted() override;
|
||||
|
||||
future<role_set> query_all() override;
|
||||
|
||||
future<bool> exists(std::string_view) override;
|
||||
|
||||
future<bool> is_superuser(std::string_view) override;
|
||||
|
||||
future<bool> can_login(std::string_view) override;
|
||||
|
||||
future<std::optional<sstring>> get_attribute(std::string_view, std::string_view) override;
|
||||
|
||||
future<role_manager::attribute_vals> query_attribute_for_all(std::string_view) override;
|
||||
|
||||
future<> set_attribute(std::string_view, std::string_view, std::string_view, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> remove_attribute(std::string_view, std::string_view, ::service::group0_batch& mc) override;
|
||||
|
||||
future<std::vector<cql3::description>> describe_role_grants() override;
|
||||
private:
|
||||
/// Connects to the LDAP server indicated by _query_template and executes LDAP bind using _bind_name and
|
||||
/// _bind_password. Returns the resulting ldap_connection.
|
||||
future<lw_shared_ptr<ldap_connection>> connect();
|
||||
|
||||
/// Invokes connect() repeatedly with backoff, until it succeeds or retry limit is reached.
|
||||
future<seastar::lw_shared_ptr<ldap_connection>> reconnect();
|
||||
|
||||
/// Macro-expands _query_template, returning the result.
|
||||
sstring get_url(std::string_view user) const;
|
||||
|
||||
/// Used to auto-create roles returned by ldap.
|
||||
future<> create_role(std::string_view role_name);
|
||||
|
||||
future<> ensure_superuser_is_created() override;
|
||||
};
|
||||
|
||||
} // namespace auth
|
||||
@@ -147,9 +147,6 @@ future<> password_authenticator::start() {
|
||||
_stopped = do_after_system_ready(_as, [this] {
|
||||
return async([this] {
|
||||
if (legacy_mode(_qp)) {
|
||||
if (!_superuser_created_promise.available()) {
|
||||
_superuser_created_promise.set_value();
|
||||
}
|
||||
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||
|
||||
if (any_nondefault_role_row_satisfies(_qp, &has_salted_hash, _superuser).get()) {
|
||||
@@ -165,11 +162,7 @@ future<> password_authenticator::start() {
|
||||
return;
|
||||
}
|
||||
}
|
||||
utils::get_local_injector().inject("password_authenticator_start_pause", utils::wait_for_message(5min)).get();
|
||||
create_default_if_missing().get();
|
||||
if (!legacy_mode(_qp)) {
|
||||
_superuser_created_promise.set_value();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -368,8 +361,4 @@ const resource_set& password_authenticator::protected_resources() const {
|
||||
});
|
||||
}
|
||||
|
||||
future<> password_authenticator::ensure_superuser_is_created() const {
|
||||
return _superuser_created_promise.get_shared_future();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/abort_source.hh>
|
||||
#include <seastar/core/shared_future.hh>
|
||||
|
||||
#include "db/consistency_level_type.hh"
|
||||
#include "auth/authenticator.hh"
|
||||
@@ -42,7 +41,6 @@ class password_authenticator : public authenticator {
|
||||
future<> _stopped;
|
||||
abort_source _as;
|
||||
std::string _superuser;
|
||||
shared_promise<> _superuser_created_promise;
|
||||
|
||||
public:
|
||||
static db::consistency_level consistency_for_user(std::string_view role_name);
|
||||
@@ -82,8 +80,6 @@ public:
|
||||
|
||||
virtual ::shared_ptr<sasl_challenge> new_sasl_challenge() const override;
|
||||
|
||||
virtual future<> ensure_superuser_is_created() const override;
|
||||
|
||||
private:
|
||||
bool legacy_metadata_exists() const;
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include <iterator>
|
||||
#include <unordered_map>
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
|
||||
@@ -147,7 +148,7 @@ resource::resource(functions_resource_t, std::string_view keyspace, std::string_
|
||||
}
|
||||
|
||||
sstring resource::name() const {
|
||||
return fmt::to_string(fmt::join(_parts, "/"));
|
||||
return boost::algorithm::join(_parts, "/");
|
||||
}
|
||||
|
||||
std::optional<resource> resource::parent() const {
|
||||
|
||||
@@ -68,11 +68,4 @@ future<authenticated_user> plain_sasl_challenge::get_authenticated_user() const
|
||||
return _when_complete(*_username, *_password);
|
||||
}
|
||||
|
||||
const sstring& plain_sasl_challenge::get_username() const {
|
||||
if (!_username) {
|
||||
throw std::logic_error("plain_sasl_challenge::get_username() called without username");
|
||||
}
|
||||
return *_username;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -35,8 +35,6 @@ public:
|
||||
virtual bool is_complete() const = 0;
|
||||
|
||||
virtual future<authenticated_user> get_authenticated_user() const = 0;
|
||||
|
||||
virtual const sstring& get_username() const = 0;
|
||||
};
|
||||
|
||||
class plain_sasl_challenge : public sasl_challenge {
|
||||
@@ -52,8 +50,6 @@ public:
|
||||
|
||||
virtual future<authenticated_user> get_authenticated_user() const override;
|
||||
|
||||
virtual const sstring& get_username() const override;
|
||||
|
||||
private:
|
||||
std::optional<sstring> _username, _password;
|
||||
completion_callback _when_complete;
|
||||
|
||||
@@ -1,202 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "auth/saslauthd_authenticator.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <seastar/core/reactor.hh>
|
||||
#include <seastar/core/temporary_buffer.hh>
|
||||
#include <seastar/net/api.hh>
|
||||
#include <seastar/net/socket_defs.hh>
|
||||
#include <seastar/util/log.hh>
|
||||
#include <system_error>
|
||||
#include "common.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "db/config.hh"
|
||||
#include "utils/log.hh"
|
||||
#include "seastarx.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
|
||||
namespace auth {
|
||||
|
||||
static logging::logger mylog("saslauthd_authenticator");
|
||||
|
||||
// To ensure correct initialization order, we unfortunately need to use a string literal.
|
||||
static const class_registrator<
|
||||
authenticator,
|
||||
saslauthd_authenticator,
|
||||
cql3::query_processor&,
|
||||
::service::raft_group0_client&,
|
||||
::service::migration_manager&> saslauthd_auth_reg("com.scylladb.auth.SaslauthdAuthenticator");
|
||||
|
||||
saslauthd_authenticator::saslauthd_authenticator(cql3::query_processor& qp, ::service::raft_group0_client&, ::service::migration_manager&)
|
||||
: _socket_path(qp.db().get_config().saslauthd_socket_path())
|
||||
{}
|
||||
|
||||
future<> saslauthd_authenticator::start() {
|
||||
return once_among_shards([this] {
|
||||
return file_exists(_socket_path).then([this] (bool exists) {
|
||||
if (!exists) {
|
||||
mylog.warn("saslauthd socket file {} doesn't exist -- is saslauthd running?", _socket_path);
|
||||
}
|
||||
return make_ready_future();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<> saslauthd_authenticator::stop() { return make_ready_future(); }
|
||||
|
||||
std::string_view saslauthd_authenticator::qualified_java_name() const {
|
||||
return "com.scylladb.auth.SaslauthdAuthenticator";
|
||||
}
|
||||
|
||||
bool saslauthd_authenticator::require_authentication() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
authentication_option_set saslauthd_authenticator::supported_options() const {
|
||||
return authentication_option_set{authentication_option::password, authentication_option::options};
|
||||
}
|
||||
|
||||
authentication_option_set saslauthd_authenticator::alterable_options() const {
|
||||
return supported_options();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Note the saslauthd protocol description:
|
||||
// https://github.com/cyrusimap/cyrus-sasl/blob/f769dde423e1b3ae8bfb35b826fca3d5f1e1f6fe/saslauthd/saslauthd-main.c#L74
|
||||
|
||||
constexpr size_t len_size = sizeof(htons(0));
|
||||
|
||||
char* pack(std::string_view s, char* p) {
|
||||
uint16_t size = s.size();
|
||||
produce_be(p, size);
|
||||
memcpy(p, s.data(), size);
|
||||
return p + size;
|
||||
}
|
||||
|
||||
temporary_buffer<char> make_saslauthd_message(const saslauthd_credentials& creds) {
|
||||
temporary_buffer<char> message(
|
||||
creds.username.size() + creds.password.size() + creds.service.size() + creds.realm.size()
|
||||
+ 4 * len_size);
|
||||
auto p = pack(creds.username, message.get_write());
|
||||
p = pack(creds.password, p);
|
||||
p = pack(creds.service, p);
|
||||
p = pack(creds.realm, p);
|
||||
return message;
|
||||
}
|
||||
|
||||
/// An exception handler that reports saslauthd socket IO error.
|
||||
future<bool> as_authentication_exception(std::exception_ptr ex) {
|
||||
return make_exception_future<bool>(
|
||||
exceptions::authentication_exception(format("saslauthd socket IO error: {}", ex)));
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
future<bool> authenticate_with_saslauthd(sstring saslauthd_socket_path, const saslauthd_credentials& creds) {
|
||||
socket_address addr((unix_domain_addr(saslauthd_socket_path)));
|
||||
// TODO: switch to seastar::connect() when it supports Unix domain sockets.
|
||||
return engine().net().connect(addr).then([creds = std::move(creds)] (connected_socket s) {
|
||||
return do_with(
|
||||
s.input(), s.output(),
|
||||
[creds = std::move(creds)] (input_stream<char>& in, output_stream<char>& out) {
|
||||
return out.write(make_saslauthd_message(creds)).then([&in, &out] () mutable {
|
||||
return out.flush().then([&in] () mutable {
|
||||
return in.read_exactly(2).then([&in] (temporary_buffer<char> len) mutable {
|
||||
if (len.size() < 2) {
|
||||
return make_exception_future<bool>(
|
||||
exceptions::authentication_exception(
|
||||
"saslauthd closed connection before completing response"));
|
||||
}
|
||||
const auto paylen = read_be<uint16_t>(len.get());
|
||||
return in.read_exactly(paylen).then([paylen] (temporary_buffer<char> resp) {
|
||||
mylog.debug("saslauthd response: {}", std::string_view(resp.get(), resp.size()));
|
||||
if (resp.size() != paylen) {
|
||||
return make_exception_future<bool>(
|
||||
exceptions::authentication_exception(
|
||||
// We say "different" here, though we could just as well say
|
||||
// "shorter". A longer response is cut to size by
|
||||
// read_exactly().
|
||||
"saslauthd response length different than promised"));
|
||||
}
|
||||
bool ok = (resp.size() >= 2 && resp[0] == 'O' && resp[1] == 'K');
|
||||
return make_ready_future<bool>(ok);
|
||||
});
|
||||
}).finally([&in] () mutable { return in.close(); });
|
||||
}).handle_exception(as_authentication_exception).finally([&out] () mutable {
|
||||
return out.close();
|
||||
});
|
||||
});
|
||||
});
|
||||
}).handle_exception_type([] (std::system_error& e) {
|
||||
return make_exception_future<bool>(
|
||||
exceptions::authentication_exception(format("saslauthd socket connection error: {}", e.what())));
|
||||
});
|
||||
}
|
||||
|
||||
future<authenticated_user> saslauthd_authenticator::authenticate(const credentials_map& credentials) const {
|
||||
const auto username_found = credentials.find(USERNAME_KEY);
|
||||
if (username_found == credentials.end()) {
|
||||
throw exceptions::authentication_exception(format("Required key '{}' is missing", USERNAME_KEY));
|
||||
}
|
||||
const auto password_found = credentials.find(PASSWORD_KEY);
|
||||
if (password_found == credentials.end()) {
|
||||
throw exceptions::authentication_exception(format("Required key '{}' is missing", PASSWORD_KEY));
|
||||
}
|
||||
const auto service_found = credentials.find(SERVICE_KEY);
|
||||
const auto realm_found = credentials.find(REALM_KEY);
|
||||
|
||||
sstring username = username_found->second;
|
||||
return authenticate_with_saslauthd(_socket_path, {username, password_found->second,
|
||||
service_found == credentials.end() ? "" : service_found->second,
|
||||
realm_found == credentials.end() ? "" : realm_found->second}).then([username] (bool ok) {
|
||||
if (!ok) {
|
||||
throw exceptions::authentication_exception("Incorrect credentials");
|
||||
}
|
||||
return make_ready_future<authenticated_user>(username);
|
||||
});
|
||||
}
|
||||
|
||||
future<> saslauthd_authenticator::create(std::string_view role_name, const authentication_options& options, ::service::group0_batch& mc) {
|
||||
if (!options.credentials) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
throw exceptions::authentication_exception("Cannot create passwords with SaslauthdAuthenticator");
|
||||
}
|
||||
|
||||
future<> saslauthd_authenticator::alter(std::string_view role_name, const authentication_options& options, ::service::group0_batch& mc) {
|
||||
if (!options.credentials) {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
throw exceptions::authentication_exception("Cannot modify passwords with SaslauthdAuthenticator");
|
||||
}
|
||||
|
||||
future<> saslauthd_authenticator::drop(std::string_view name, ::service::group0_batch& mc) {
|
||||
throw exceptions::authentication_exception("Cannot delete passwords with SaslauthdAuthenticator");
|
||||
}
|
||||
|
||||
future<custom_options> saslauthd_authenticator::query_custom_options(std::string_view role_name) const {
|
||||
return make_ready_future<custom_options>();
|
||||
}
|
||||
|
||||
const resource_set& saslauthd_authenticator::protected_resources() const {
|
||||
static const resource_set empty;
|
||||
return empty;
|
||||
}
|
||||
|
||||
::shared_ptr<sasl_challenge> saslauthd_authenticator::new_sasl_challenge() const {
|
||||
return ::make_shared<plain_sasl_challenge>([this](std::string_view username, std::string_view password) {
|
||||
return this->authenticate(credentials_map{{USERNAME_KEY, sstring(username)}, {PASSWORD_KEY, sstring(password)}});
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace auth
|
||||
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2020 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "auth/authenticator.hh"
|
||||
|
||||
namespace cql3 {
|
||||
class query_processor;
|
||||
}
|
||||
|
||||
namespace service {
|
||||
class migration_manager;
|
||||
class raft_group0_client;
|
||||
}
|
||||
|
||||
namespace auth {
|
||||
|
||||
/// Delegates authentication to saslauthd. When this class is asked to authenticate, it passes the credentials
|
||||
/// to saslauthd, gets its response, and allows or denies authentication based on that response.
|
||||
class saslauthd_authenticator : public authenticator {
|
||||
sstring _socket_path; ///< Path to the domain socket on which saslauthd is listening.
|
||||
public:
|
||||
saslauthd_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&);
|
||||
|
||||
future<> start() override;
|
||||
|
||||
future<> stop() override;
|
||||
|
||||
std::string_view qualified_java_name() const override;
|
||||
|
||||
bool require_authentication() const override;
|
||||
|
||||
authentication_option_set supported_options() const override;
|
||||
|
||||
authentication_option_set alterable_options() const override;
|
||||
|
||||
future<authenticated_user> authenticate(const credentials_map& credentials) const override;
|
||||
|
||||
future<> create(std::string_view role_name, const authentication_options& options, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> alter(std::string_view role_name, const authentication_options& options, ::service::group0_batch& mc) override;
|
||||
|
||||
future<> drop(std::string_view role_name, ::service::group0_batch& mc) override;
|
||||
|
||||
future<custom_options> query_custom_options(std::string_view role_name) const override;
|
||||
|
||||
const resource_set& protected_resources() const override;
|
||||
|
||||
::shared_ptr<sasl_challenge> new_sasl_challenge() const override;
|
||||
|
||||
virtual future<> ensure_superuser_is_created() const override {
|
||||
return make_ready_future<>();
|
||||
}
|
||||
};
|
||||
|
||||
/// A set of four credential strings that saslauthd expects.
|
||||
struct saslauthd_credentials {
|
||||
sstring username, password, service, realm;
|
||||
};
|
||||
|
||||
future<bool> authenticate_with_saslauthd(sstring saslauthd_socket_path, const saslauthd_credentials& creds);
|
||||
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
#include "auth/service.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <chrono>
|
||||
|
||||
#include <seastar/core/future-util.hh>
|
||||
@@ -263,8 +264,7 @@ future<> service::stop() {
|
||||
}
|
||||
|
||||
future<> service::ensure_superuser_is_created() {
|
||||
co_await _role_manager->ensure_superuser_is_created();
|
||||
co_await _authenticator->ensure_superuser_is_created();
|
||||
return _role_manager->ensure_superuser_is_created();
|
||||
}
|
||||
|
||||
void service::update_cache_config() {
|
||||
@@ -676,8 +676,7 @@ future<permission_set> get_permissions(const service& ser, const authenticated_u
|
||||
}
|
||||
|
||||
bool is_protected(const service& ser, command_desc cmd) noexcept {
|
||||
if (cmd.type_ == command_desc::type::ALTER_WITH_OPTS ||
|
||||
cmd.type_ == command_desc::type::ALTER_SYSTEM_WITH_ALLOWED_OPTS) {
|
||||
if (cmd.type_ == command_desc::type::ALTER_WITH_OPTS) {
|
||||
return false; // Table attributes are OK to modify; see #7057.
|
||||
}
|
||||
return ser.underlying_role_manager().protected_resources().contains(cmd.resource)
|
||||
@@ -876,6 +875,7 @@ future<> migrate_to_auth_v2(db::system_keyspace& sys_ks, ::service::raft_group0_
|
||||
for (const auto& col : schema->all_columns()) {
|
||||
col_names.push_back(col.name_as_cql_string());
|
||||
}
|
||||
auto col_names_str = boost::algorithm::join(col_names, ", ");
|
||||
sstring val_binders_str = "?";
|
||||
for (size_t i = 1; i < col_names.size(); ++i) {
|
||||
val_binders_str += ", ?";
|
||||
@@ -891,10 +891,10 @@ future<> migrate_to_auth_v2(db::system_keyspace& sys_ks, ::service::raft_group0_
|
||||
}
|
||||
}
|
||||
auto muts = co_await qp.get_mutations_internal(
|
||||
seastar::format("INSERT INTO {}.{} ({}) VALUES ({})",
|
||||
format("INSERT INTO {}.{} ({}) VALUES ({})",
|
||||
db::system_keyspace::NAME,
|
||||
cf_name,
|
||||
fmt::join(col_names, ", "),
|
||||
col_names_str,
|
||||
val_binders_str),
|
||||
internal_distributed_query_state(),
|
||||
ts,
|
||||
|
||||
@@ -224,7 +224,6 @@ struct command_desc {
|
||||
const ::auth::resource& resource; ///< Resource impacted by this command.
|
||||
enum class type {
|
||||
ALTER_WITH_OPTS, ///< Command is ALTER ... WITH ...
|
||||
ALTER_SYSTEM_WITH_ALLOWED_OPTS,
|
||||
OTHER
|
||||
} type_ = type::OTHER;
|
||||
};
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <seastar/core/future-util.hh>
|
||||
#include <seastar/core/on_internal_error.hh>
|
||||
#include <seastar/core/format.hh>
|
||||
@@ -329,7 +330,7 @@ standard_role_manager::alter(std::string_view role_name, const role_config_updat
|
||||
assignments.push_back(sstring("can_login = ") + (*u.can_login ? "true" : "false"));
|
||||
}
|
||||
|
||||
return fmt::to_string(fmt::join(assignments, ", "));
|
||||
return boost::algorithm::join(assignments, ", ");
|
||||
};
|
||||
|
||||
return require_record(_qp, role_name).then([this, role_name, &u, &mc](record) {
|
||||
|
||||
@@ -146,10 +146,6 @@ public:
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
const sstring& get_username() const override {
|
||||
return _sasl->get_username();
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -159,10 +155,6 @@ public:
|
||||
};
|
||||
return ::make_shared<sasl_wrapper>(_authenticator->new_sasl_challenge());
|
||||
}
|
||||
|
||||
virtual future<> ensure_superuser_is_created() const override {
|
||||
return _authenticator->ensure_superuser_is_created();
|
||||
}
|
||||
};
|
||||
|
||||
class transitional_authorizer : public authorizer {
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright (C) 2023-present ScyllaDB
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
|
||||
trap 'echo "error $? in $0 line $LINENO"' ERR
|
||||
|
||||
here=$(dirname "$0")
|
||||
exec "$here/../tools/cqlsh/bin/cqlsh.py" "$@"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash -e
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2024-present ScyllaDB
|
||||
#
|
||||
@@ -6,8 +6,6 @@
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
|
||||
trap 'echo "error $? in $0 line $LINENO"' ERR
|
||||
|
||||
SCRIPT_PATH=$(dirname $(realpath "$0"))
|
||||
|
||||
INSTALLED_SCYLLA_PATH="${SCRIPT_PATH}/scylla"
|
||||
|
||||
2
bytes.hh
2
bytes.hh
@@ -20,8 +20,6 @@
|
||||
#include "utils/mutable_view.hh"
|
||||
#include "utils/simple_hashers.hh"
|
||||
|
||||
using sstring_view = std::string_view;
|
||||
|
||||
inline bytes to_bytes(bytes&& b) {
|
||||
return std::move(b);
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
|
||||
#include "utils/assert.hh"
|
||||
#include <vector>
|
||||
#include <seastar/core/when_all.hh>
|
||||
#include "row_cache.hh"
|
||||
#include "mutation/mutation_fragment.hh"
|
||||
#include "query-request.hh"
|
||||
@@ -402,7 +402,7 @@ future<cdc::generation_id> generation_service::legacy_make_new_generation(const
|
||||
throw std::runtime_error(
|
||||
format("Can't find endpoint for token {}", end));
|
||||
}
|
||||
const auto ep = _gossiper.get_address_map().get(*endpoint);
|
||||
const auto ep = tmptr->get_endpoint_for_host_id(*endpoint);
|
||||
auto sc = get_shard_count(ep, _gossiper);
|
||||
return {sc > 0 ? sc : 1, get_sharding_ignore_msb(ep, _gossiper)};
|
||||
}
|
||||
@@ -1025,7 +1025,7 @@ future<> generation_service::legacy_handle_cdc_generation(std::optional<cdc::gen
|
||||
if (using_this_gen) {
|
||||
cdc_log.info("Starting to use generation {}", *gen_id);
|
||||
co_await update_streams_description(*gen_id, _sys_ks.local(), get_sys_dist_ks(),
|
||||
[&tm = _token_metadata] { return tm.get()->count_normal_token_owners(); },
|
||||
[tmptr = _token_metadata.get()] { return tmptr->count_normal_token_owners(); },
|
||||
_abort_src);
|
||||
}
|
||||
}
|
||||
@@ -1042,7 +1042,7 @@ void generation_service::legacy_async_handle_cdc_generation(cdc::generation_id g
|
||||
if (using_this_gen) {
|
||||
cdc_log.info("Starting to use generation {}", gen_id);
|
||||
co_await update_streams_description(gen_id, svc->_sys_ks.local(), svc->get_sys_dist_ks(),
|
||||
[&tm = svc->_token_metadata] { return tm.get()->count_normal_token_owners(); },
|
||||
[tmptr = svc->_token_metadata.get()] { return tmptr->count_normal_token_owners(); },
|
||||
svc->_abort_src);
|
||||
}
|
||||
co_return;
|
||||
@@ -1112,9 +1112,7 @@ future<bool> generation_service::legacy_do_handle_cdc_generation(cdc::generation
|
||||
auto sys_dist_ks = get_sys_dist_ks();
|
||||
auto gen = co_await retrieve_generation_data(gen_id, _sys_ks.local(), *sys_dist_ks, { _token_metadata.get()->count_normal_token_owners() });
|
||||
if (!gen) {
|
||||
// This may happen during raft upgrade when a node gossips about a generation that
|
||||
// was propagated through raft and we didn't apply it yet.
|
||||
throw generation_handling_nonfatal_exception(fmt::format(
|
||||
throw std::runtime_error(fmt::format(
|
||||
"Could not find CDC generation {} in distributed system tables (current time: {}),"
|
||||
" even though some node gossiped about it.",
|
||||
gen_id, db_clock::now()));
|
||||
|
||||
16
cdc/log.cc
16
cdc/log.cc
@@ -10,6 +10,7 @@
|
||||
#include <algorithm>
|
||||
|
||||
#include <boost/range/irange.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
#include <seastar/core/thread.hh>
|
||||
#include <seastar/core/metrics.hh>
|
||||
|
||||
@@ -40,7 +41,6 @@
|
||||
#include "types/listlike_partial_deserializing_iterator.hh"
|
||||
#include "tracing/trace_state.hh"
|
||||
#include "stats.hh"
|
||||
#include "utils/labels.hh"
|
||||
|
||||
namespace std {
|
||||
|
||||
@@ -68,7 +68,7 @@ void cdc::stats::parts_touched_stats::register_metrics(seastar::metrics::metric_
|
||||
metrics.add_group(cdc_group_name, {
|
||||
sm::make_total_operations(seastar::format("operations_on_{}_performed_{}", part_name, suffix), count[(size_t)part],
|
||||
sm::description(seastar::format("number of {} CDC operations that processed a {}", suffix, part_name)),
|
||||
{cdc_label}).set_skip_when_empty()
|
||||
{})
|
||||
});
|
||||
};
|
||||
|
||||
@@ -91,23 +91,23 @@ cdc::stats::stats() {
|
||||
_metrics.add_group(cdc_group_name, {
|
||||
sm::make_total_operations("operations_" + kind, counters.unsplit_count,
|
||||
sm::description(format("number of {} CDC operations", kind)),
|
||||
{split_label(false), basic_level, cdc_label}).set_skip_when_empty(),
|
||||
{split_label(false)}),
|
||||
|
||||
sm::make_total_operations("operations_" + kind, counters.split_count,
|
||||
sm::description(format("number of {} CDC operations", kind)),
|
||||
{split_label(true), basic_level, cdc_label}).set_skip_when_empty(),
|
||||
{split_label(true)}),
|
||||
|
||||
sm::make_total_operations("preimage_selects_" + kind, counters.preimage_selects,
|
||||
sm::description(format("number of {} preimage queries performed", kind)),
|
||||
{cdc_label}).set_skip_when_empty(),
|
||||
{}),
|
||||
|
||||
sm::make_total_operations("operations_with_preimage_" + kind, counters.with_preimage_count,
|
||||
sm::description(format("number of {} operations that included preimage", kind)),
|
||||
{cdc_label}).set_skip_when_empty(),
|
||||
{}),
|
||||
|
||||
sm::make_total_operations("operations_with_postimage_" + kind, counters.with_postimage_count,
|
||||
sm::description(format("number of {} operations that included postimage", kind)),
|
||||
{cdc_label}).set_skip_when_empty()
|
||||
{})
|
||||
});
|
||||
|
||||
counters.touches.register_metrics(_metrics, kind);
|
||||
@@ -420,7 +420,7 @@ static const sstring cdc_deleted_column_prefix = cdc_meta_column_prefix + "delet
|
||||
static const sstring cdc_deleted_elements_column_prefix = cdc_meta_column_prefix + "deleted_elements_";
|
||||
|
||||
bool is_log_name(const std::string_view& table_name) {
|
||||
return table_name.ends_with(cdc_log_suffix);
|
||||
return boost::ends_with(table_name, cdc_log_suffix);
|
||||
}
|
||||
|
||||
bool is_cdc_metacolumn_name(const sstring& name) {
|
||||
|
||||
@@ -186,7 +186,7 @@ bool cdc::metadata::prepare(db_clock::time_point tp) {
|
||||
}
|
||||
|
||||
auto ts = to_ts(tp);
|
||||
auto [it, emplaced] = _gens.emplace(to_ts(tp), std::nullopt);
|
||||
auto emplaced = _gens.emplace(to_ts(tp), std::nullopt).second;
|
||||
|
||||
if (_last_stream_timestamp != api::missing_timestamp) {
|
||||
auto last_correct_gen = gen_used_at(_last_stream_timestamp);
|
||||
@@ -201,5 +201,5 @@ bool cdc::metadata::prepare(db_clock::time_point tp) {
|
||||
}
|
||||
}
|
||||
|
||||
return !it->second;
|
||||
return emplaced;
|
||||
}
|
||||
|
||||
@@ -45,7 +45,6 @@ struct client_data {
|
||||
std::optional<bool> ssl_enabled;
|
||||
std::optional<sstring> ssl_protocol;
|
||||
std::optional<sstring> username;
|
||||
std::optional<sstring> scheduling_group_name;
|
||||
|
||||
sstring stage_str() const { return to_string(connection_stage); }
|
||||
sstring client_type_str() const { return to_string(ct); }
|
||||
|
||||
@@ -16,13 +16,11 @@
|
||||
#include "mutation/mutation_fragment.hh"
|
||||
#include "mutation/mutation_fragment_v2.hh"
|
||||
|
||||
#include <ranges>
|
||||
|
||||
// Utility for in-order checking of overlap with position ranges.
|
||||
class clustering_ranges_walker {
|
||||
const schema& _schema;
|
||||
const query::clustering_row_ranges& _ranges;
|
||||
std::ranges::subrange<query::clustering_row_ranges::const_iterator> _current_range;
|
||||
boost::iterator_range<query::clustering_row_ranges::const_iterator> _current_range;
|
||||
bool _in_current; // next position is known to be >= _current_start
|
||||
bool _past_current; // next position is known to be >= _current_end
|
||||
bool _using_clustering_range; // Whether current range comes from _current_range
|
||||
@@ -40,7 +38,7 @@ private:
|
||||
if (!_current_range) {
|
||||
return false;
|
||||
}
|
||||
_current_range.advance(1);
|
||||
_current_range.advance_begin(1);
|
||||
}
|
||||
++_change_counter;
|
||||
_using_clustering_range = true;
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
#
|
||||
# Copyright 2024-present ScyllaDB
|
||||
#
|
||||
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
find_package(PkgConfig REQUIRED)
|
||||
|
||||
foreach(component ${OpenLDAP_FIND_COMPONENTS})
|
||||
pkg_search_module(PC_${component} QUIET ${component})
|
||||
find_path (OpenLDAP_${component}_INCLUDE_DIR
|
||||
NAMES lber.h
|
||||
HINTS
|
||||
${PC_${component}_INCLUDEDIR}
|
||||
${PC_${component}_INCLUDE_DIRS})
|
||||
find_library(OpenLDAP_${component}_LIBRARY
|
||||
NAMES ${component}
|
||||
HINTS
|
||||
${PC_${component}_LIBDIR}
|
||||
${PC_${component}_LIBRARY_DIRS})
|
||||
list(APPEND OpenLDAP_INCLUDE_DIRS OpenLDAP_${component}_INCLUDE_DIR)
|
||||
list(APPEND OpenLDAP_LIBRARIES OpenLDAP_${component}_LIBRARY)
|
||||
endforeach()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(OpenLDAP
|
||||
DEFAULT_MSG
|
||||
${OpenLDAP_INCLUDE_DIRS}
|
||||
${OpenLDAP_LIBRARIES})
|
||||
|
||||
mark_as_advanced(
|
||||
${OpenLDAP_INCLUDE_DIRS}
|
||||
${OpenLDAP_LIBRARIES})
|
||||
|
||||
if(OpenLDAP_FOUND)
|
||||
foreach(component ${OpenLDAP_FIND_COMPONENTS})
|
||||
if(NOT TARGET OpenLDAP::${component})
|
||||
add_library(OpenLDAP::${component} UNKNOWN IMPORTED)
|
||||
set_target_properties(OpenLDAP::${component} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${OpenLDAP_${component}_INCLUDE_DIR}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
|
||||
IMPORTED_LOCATION "${OpenLDAP_${component}_LIBRARY}")
|
||||
endif()
|
||||
endforeach()
|
||||
endif()
|
||||
@@ -1,53 +0,0 @@
|
||||
#
|
||||
# Copyright 2024-present ScyllaDB
|
||||
#
|
||||
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
|
||||
set(kmip_ver "2.1.0t")
|
||||
|
||||
cmake_host_system_information(
|
||||
RESULT distrib_id QUERY DISTRIB_ID)
|
||||
if(distrib_id MATCHES "centos|fedora|rhel")
|
||||
set(kmip_distrib "rhel84")
|
||||
else()
|
||||
message(FATAL_ERROR "Could not locate kmipc library for ${distrib_id}")
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|AARCH64")
|
||||
set(kmip_arch "aarch64")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64")
|
||||
set(kmip_arch "64")
|
||||
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "(powerpc|ppc)64le")
|
||||
set(kmip_arch "ppc64le")
|
||||
endif()
|
||||
|
||||
set(kmip_ROOT "${PROJECT_SOURCE_DIR}/kmipc/kmipc-${kmip_ver}-${kmip_distrib}_${kmip_arch}")
|
||||
find_library(kmip_LIBRARY
|
||||
NAMES kmip
|
||||
HINTS ${kmip_ROOT}/lib)
|
||||
|
||||
find_path(kmip_INCLUDE_DIR
|
||||
NAMES kmip.h
|
||||
HINTS ${kmip_ROOT}/include)
|
||||
|
||||
mark_as_advanced(
|
||||
kmip_LIBRARY
|
||||
kmip_INCLUDE_DIR)
|
||||
|
||||
find_package_handle_standard_args(kmip
|
||||
REQUIRED_VARS
|
||||
kmip_LIBRARY
|
||||
kmip_INCLUDE_DIR)
|
||||
|
||||
if(kmip_FOUND)
|
||||
if (NOT TARGET KMIP::kmipc)
|
||||
add_library(KMIP::kmipc UNKNOWN IMPORTED)
|
||||
set_target_properties(KMIP::kmipc PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES "${kmip_INCLUDE_DIR}"
|
||||
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
|
||||
IMPORTED_LOCATION "${kmip_LIBRARY}")
|
||||
endif()
|
||||
endif()
|
||||
@@ -1,60 +0,0 @@
|
||||
#
|
||||
# Copyright 2024-present ScyllaDB
|
||||
#
|
||||
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
|
||||
find_package (PkgConfig REQUIRED)
|
||||
|
||||
pkg_search_module (PC_lz4 QUIET liblz4)
|
||||
|
||||
find_library (lz4_STATIC_LIBRARY
|
||||
NAMES liblz4.a
|
||||
HINTS
|
||||
${PC_lz4_STATIC_LIBDIR}
|
||||
${PC_lz4_STATIC_LIBRARY_DIRS})
|
||||
|
||||
find_library (lz4_LIBRARY
|
||||
NAMES lz4
|
||||
HINTS
|
||||
${PC_lz4_LIBDIR}
|
||||
${PC_lz4_LIBRARY_DIRS})
|
||||
|
||||
find_path (lz4_INCLUDE_DIR
|
||||
NAMES lz4.h
|
||||
HINTS
|
||||
${PC_lz4_STATIC_INCLUDEDIR}
|
||||
${PC_lz4_STATIC_INCLUDE_DIRS})
|
||||
|
||||
mark_as_advanced (
|
||||
lz4_STATIC_LIBRARY
|
||||
lz4_LIBRARY
|
||||
lz4_INCLUDE_DIR)
|
||||
|
||||
include (FindPackageHandleStandardArgs)
|
||||
|
||||
find_package_handle_standard_args (lz4
|
||||
REQUIRED_VARS
|
||||
lz4_STATIC_LIBRARY
|
||||
lz4_LIBRARY
|
||||
lz4_INCLUDE_DIR
|
||||
VERSION_VAR PC_lz4_STATIC_VERSION)
|
||||
|
||||
if (lz4_FOUND)
|
||||
if (NOT (TARGET lz4::lz4_static))
|
||||
add_library (lz4::lz4_static UNKNOWN IMPORTED)
|
||||
set_target_properties (lz4::lz4_static
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${lz4_STATIC_LIBRARY}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${lz4_INCLUDE_DIR})
|
||||
endif ()
|
||||
if (NOT (TARGET lz4::lz4))
|
||||
add_library (lz4::lz4 UNKNOWN IMPORTED)
|
||||
set_target_properties (lz4::lz4
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${lz4_LIBRARY}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${lz4_INCLUDE_DIR})
|
||||
endif ()
|
||||
endif ()
|
||||
@@ -1,48 +0,0 @@
|
||||
#
|
||||
# Copyright 2023-present ScyllaDB
|
||||
#
|
||||
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
find_package(PkgConfig REQUIRED)
|
||||
|
||||
pkg_check_modules(PC_p11_kit QUIET p11-kit-1)
|
||||
|
||||
find_library(p11-kit_LIBRARY
|
||||
NAMES p11-kit
|
||||
PATH_SUFFIXES p11-kit-1
|
||||
HINTS
|
||||
${PC_p11_kit_LIBDIR}
|
||||
${PC_p11_kit_LIBRARY_DIRS})
|
||||
|
||||
find_path(p11-kit_INCLUDE_DIR
|
||||
NAMES p11-kit/p11-kit.h
|
||||
HINTS
|
||||
${PC_p11_kit_INCLUDEDIR}
|
||||
${PC_p11_kit_INCLUDE_DIRS})
|
||||
|
||||
mark_as_advanced(
|
||||
p11-kit_LIBRARY
|
||||
p11-kit_INCLUDE_DIR)
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
|
||||
find_package_handle_standard_args(p11-kit
|
||||
REQUIRED_VARS
|
||||
p11-kit_LIBRARY
|
||||
p11-kit_INCLUDE_DIR
|
||||
VERSION_VAR PC_p11_kit_VERSION)
|
||||
|
||||
if(p11-kit_FOUND)
|
||||
set(p11-kit_LIBRARIES ${p11-kit_LIBRARY})
|
||||
set(p11-kit_INCLUDE_DIRS ${p11-kit_INCLUDE_DIR})
|
||||
if(NOT(TARGET p11-kit::p11-kit))
|
||||
add_library(p11-kit::p11-kit UNKNOWN IMPORTED)
|
||||
|
||||
set_target_properties(p11-kit::p11-kit
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${p11-kit_LIBRARY}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${p11-kit_INCLUDE_DIRS})
|
||||
endif()
|
||||
endif()
|
||||
@@ -8,13 +8,7 @@
|
||||
|
||||
find_package (PkgConfig REQUIRED)
|
||||
|
||||
pkg_search_module (PC_zstd QUIET libzstd)
|
||||
|
||||
find_library (zstd_STATIC_LIBRARY
|
||||
NAMES libzstd.a
|
||||
HINTS
|
||||
${PC_zstd_STATIC_LIBDIR}
|
||||
${PC_zstd_STATIC_LIBRARY_DIRS})
|
||||
pkg_check_modules (PC_zstd QUIET libzstd)
|
||||
|
||||
find_library (zstd_LIBRARY
|
||||
NAMES zstd
|
||||
@@ -25,11 +19,10 @@ find_library (zstd_LIBRARY
|
||||
find_path (zstd_INCLUDE_DIR
|
||||
NAMES zstd.h
|
||||
HINTS
|
||||
${PC_zstd_STATIC_INCLUDEDIR}
|
||||
${PC_zstd_STATIC_INCLUDE_DIRS})
|
||||
${PC_zstd_INCLUDEDIR}
|
||||
${PC_zstd_INCLUDE_DIRS})
|
||||
|
||||
mark_as_advanced (
|
||||
zstd_STATIC_LIBRARY
|
||||
zstd_LIBRARY
|
||||
zstd_INCLUDE_DIR)
|
||||
|
||||
@@ -37,20 +30,13 @@ include (FindPackageHandleStandardArgs)
|
||||
|
||||
find_package_handle_standard_args (zstd
|
||||
REQUIRED_VARS
|
||||
zstd_STATIC_LIBRARY
|
||||
zstd_LIBRARY
|
||||
zstd_INCLUDE_DIR
|
||||
VERSION_VAR PC_zstd_STATIC_VERSION)
|
||||
VERSION_VAR PC_zstd_VERSION)
|
||||
|
||||
if (zstd_FOUND)
|
||||
if (NOT (TARGET zstd::zstd_static))
|
||||
add_library (zstd::zstd_static UNKNOWN IMPORTED)
|
||||
|
||||
set_target_properties (zstd::zstd_static
|
||||
PROPERTIES
|
||||
IMPORTED_LOCATION ${zstd_STATIC_LIBRARY}
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${zstd_INCLUDE_DIR})
|
||||
endif ()
|
||||
set (zstd_LIBRARIES ${zstd_LIBRARY})
|
||||
set (zstd_INCLUDE_DIRS ${zstd_INCLUDE_DIR})
|
||||
if (NOT (TARGET zstd::libzstd))
|
||||
add_library (zstd::libzstd UNKNOWN IMPORTED)
|
||||
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
#
|
||||
# Copyright 2024-present ScyllaDB
|
||||
#
|
||||
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
function(enable_lto name)
|
||||
get_target_property(type ${name} TYPE)
|
||||
if(type MATCHES "OBJECT_LIBRARY|STATIC_LIBRARY|SHARED_LIBRARY|EXECUTABLE")
|
||||
target_compile_options(${name} PRIVATE
|
||||
$<$<CONFIG:RelWithDebInfo>:-ffat-lto-objects>)
|
||||
set_property(TARGET ${name} PROPERTY
|
||||
INTERPROCEDURAL_OPTIMIZATION_RELWITHDEBINFO ON)
|
||||
if(type MATCHES "SHARED_LIBRARY|EXECUTABLE")
|
||||
target_link_options(${name}
|
||||
PRIVATE $<$<CONFIG:RelWithDebInfo>:-ffat-lto-objects>)
|
||||
endif()
|
||||
elseif(type STREQUAL "INTERFACE_LIBRARY")
|
||||
if (name MATCHES "^scylla_(.*)$")
|
||||
# Special handling for scylla_* libraries with whole archive linking
|
||||
set(library "${CMAKE_MATCH_1}")
|
||||
enable_lto(${library})
|
||||
# For non-scylla_* INTERFACE libraries, we don't compile them,
|
||||
# hence no need to set the LTO compile options or property
|
||||
endif()
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported TYPE: ${name}:${type}")
|
||||
endif()
|
||||
endfunction()
|
||||
@@ -1,36 +1,16 @@
|
||||
if(NOT DEFINED Scylla_PARALLEL_LINK_JOBS)
|
||||
if(NOT DEFINED Scylla_RAM_PER_LINK_JOB)
|
||||
# preserve user-provided value
|
||||
set(_default_ram_value 4096)
|
||||
if(Scylla_ENABLE_LTO)
|
||||
# When ThinLTO optimization is enabled, the linker uses all available CPU threads.
|
||||
# To prevent excessive memory usage, we limit parallel link jobs based on available RAM,
|
||||
# as each link job requires significant memory during optimization.
|
||||
set(_default_ram_value 16384)
|
||||
endif()
|
||||
set(Scylla_RAM_PER_LINK_JOB ${_default_ram_value} CACHE STRING
|
||||
"Maximum amount of memory used by each link job (in MiB)")
|
||||
endif()
|
||||
cmake_host_system_information(
|
||||
RESULT _total_mem_mb
|
||||
QUERY AVAILABLE_PHYSICAL_MEMORY)
|
||||
math(EXPR _link_pool_depth "${_total_mem_mb} / ${Scylla_RAM_PER_LINK_JOB}")
|
||||
# Use 2 parallel link jobs to optimize build throughput. The main executable requires
|
||||
# LTO (slower link phase) while tests are linked without LTO (faster link phase).
|
||||
# This allows simultaneous linking of LTO and non-LTO targets, enabling better CPU
|
||||
# utilization by overlapping the slower LTO link with faster test links.
|
||||
if(_link_pool_depth LESS 2)
|
||||
set(_link_pool_depth 2)
|
||||
endif()
|
||||
set(LINK_MEM_PER_JOB 4096 CACHE INTERNAL "Maximum memory used by each link job in (in MiB)")
|
||||
|
||||
set(Scylla_PARALLEL_LINK_JOBS "${_link_pool_depth}" CACHE STRING
|
||||
"Maximum number of concurrent link jobs")
|
||||
cmake_host_system_information(
|
||||
RESULT _total_mem
|
||||
QUERY AVAILABLE_PHYSICAL_MEMORY)
|
||||
math(EXPR _link_pool_depth "${_total_mem} / ${LINK_MEM_PER_JOB}")
|
||||
if(_link_pool_depth EQUAL 0)
|
||||
set(_link_pool_depth 1)
|
||||
endif()
|
||||
|
||||
set_property(
|
||||
GLOBAL
|
||||
APPEND
|
||||
PROPERTY JOB_POOLS
|
||||
link_pool=${Scylla_PARALLEL_LINK_JOBS}
|
||||
link_pool=${_link_pool_depth}
|
||||
submodule_pool=1)
|
||||
set(CMAKE_JOB_POOL_LINK link_pool)
|
||||
|
||||
@@ -83,48 +83,14 @@ function(get_padded_dynamic_linker_option output length)
|
||||
set(${output} "${dynamic_linker_option}=${padded_dynamic_linker}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# We want to strip the absolute build paths from the binary,
|
||||
# so that logs and debuggers show e.g. ./main.cc,
|
||||
# not /var/lib/jenkins/workdir/scylla/main.cc, or something.
|
||||
#
|
||||
# The base way to do that is -ffile-prefix-map=${CMAKE_SOURCE_DIR}/=
|
||||
# But by itself, it results in *both* DW_AT_name and DW_AT_comp_dir being
|
||||
# subject to the substitution.
|
||||
# For example, if table::query() is located
|
||||
# in /home/user/scylla/replica/table.cc,
|
||||
# and the compiler working directory is /home/user/scylla/build,
|
||||
# then after the ffile-prefix-map substitution it will
|
||||
# have DW_AT_comp_dir equal to ./build
|
||||
# and DW_AT_name equal to ./replica/table.cc
|
||||
#
|
||||
# If DW_AT_name is a relative path, gdb looks for the source files in $DW_AT_comp_dir/$DW_AT_name.
|
||||
# This results in e.g. gdb looking for seastar::thread_context::main
|
||||
# in ./build/./replica/table.cc
|
||||
# instead of replica/table.cc as we would like.
|
||||
# To unscrew this, we have to add a rule which will
|
||||
# convert the /absolute/path/to/build to `.`,
|
||||
# which will result in gdb looking in ././replica/table.cc, which is fine.
|
||||
#
|
||||
# The build rule which converts `/absolute/path/to/build/` (note trailing slash)
|
||||
# to `build/` exists just so that any DW_AT_name under build (e.g. in generated sources)
|
||||
# is excluded from the first rule.
|
||||
#
|
||||
# Note that the order of these options is important.
|
||||
# Each is strictly more specific than the previous one.
|
||||
# If they were the other way around, only the most general rule would be used.
|
||||
add_compile_options("-ffile-prefix-map=${CMAKE_SOURCE_DIR}/=")
|
||||
add_compile_options("-ffile-prefix-map=${CMAKE_BINARY_DIR}=.")
|
||||
cmake_path(GET CMAKE_BINARY_DIR FILENAME build_dir_name)
|
||||
add_compile_options("-ffile-prefix-map=${CMAKE_BINARY_DIR}/=${build_dir_name}")
|
||||
|
||||
default_target_arch(target_arch)
|
||||
if(target_arch)
|
||||
add_compile_options("-march=${target_arch}")
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
add_compile_options("SHELL:-Xclang -fexperimental-assignment-tracking=disabled")
|
||||
endif()
|
||||
add_compile_options("SHELL:-Xclang -fexperimental-assignment-tracking=disabled")
|
||||
|
||||
function(maybe_limit_stack_usage_in_KB stack_usage_threshold_in_KB config)
|
||||
math(EXPR _stack_usage_threshold_in_bytes "${stack_usage_threshold_in_KB} * 1024")
|
||||
@@ -152,100 +118,6 @@ macro(update_cxx_flags flags)
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
set(pgo_opts "")
|
||||
# Clang supports three instrumenttation methods for profile generation:
|
||||
# - -fprofile-instr-generate: this happens in the frontend (AST phase)
|
||||
# - -fprofile-generate: this happens in the middle end. this instruments the LLVM IR
|
||||
# generated by the front-end. and is called the regular PGO.
|
||||
# - -fcs-profile-generate: "cs" is short for Context Sensitive. this instrumentation
|
||||
# method is called CSPGO in comparison with the regular PGO above.
|
||||
# We use IR and CSIR to represent the last two instrumentation methods in the option
|
||||
# of Scylla_BUILD_INSTRUMENTED. the frontend instrumentation is not supported, because
|
||||
# the IR-based instrumentation is superier than the frontend-based instrumentation when
|
||||
# profiling executable for optimization purposes.
|
||||
set(Scylla_BUILD_INSTRUMENTED OFF CACHE STRING
|
||||
"Build ScyllaDB with PGO instrumentation. May be specified as IR, CSIR")
|
||||
if(Scylla_BUILD_INSTRUMENTED)
|
||||
file(TO_NATIVE_PATH "${CMAKE_BINARY_DIR}/${Scylla_BUILD_INSTRUMENTED}" Scylla_PROFILE_DATA_DIR)
|
||||
if(Scylla_BUILD_INSTRUMENTED STREQUAL "IR")
|
||||
# instrument code at IR level, also known as the regular PGO
|
||||
string(APPEND pgo_opts " -fprofile-generate=\"${Scylla_PROFILE_DATA_DIR}\"")
|
||||
elseif(Scylla_BUILD_INSTRUMENTED STREQUAL "CSIR")
|
||||
# instrument code with Context Sensitive IR, also known as CSPGO.
|
||||
string(APPEND pgo_opts " -fcs-profile-generate=\"${Scylla_PROFILE_DATA_DIR}\"")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown Scylla_BUILD_INSTRUMENTED: ${}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(Scylla_PROFDATA_FILE "" CACHE FILEPATH
|
||||
"Path to the profiling data file to use when compiling.")
|
||||
set(Scylla_PROFDATA_COMPRESSED_FILE "" CACHE FILEPATH
|
||||
"Path to the compressed profiling data file to use when compiling")
|
||||
if(Scylla_PROFDATA_FILE AND Scylla_PROFDATA_COMPRESSED_FILE)
|
||||
message(FATAL_ERROR
|
||||
"Both Scylla_PROFDATA_FILE and Scylla_PROFDATA_COMPRESSED_FILE are specified!")
|
||||
endif()
|
||||
|
||||
function(extract_compressed_file)
|
||||
find_program(XZCAT xzcat
|
||||
REQUIRED)
|
||||
|
||||
cmake_parse_arguments(parsed_args "" "INPUT;OUTPUT" "" ${ARGN})
|
||||
set(input ${parsed_args_INPUT})
|
||||
|
||||
get_filename_component(ext "${input}" LAST_EXT)
|
||||
get_filename_component(stem "${input}" NAME_WLE)
|
||||
set(output "${CMAKE_BINARY_DIR}/${stem}")
|
||||
if(ext STREQUAL ".xz")
|
||||
execute_process(
|
||||
COMMAND ${XZCAT} "${input}"
|
||||
OUTPUT_FILE "${output}"
|
||||
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown compression format: ${ext}")
|
||||
endif()
|
||||
set(${parsed_args_OUTPUT} ${output} PARENT_SCOPE)
|
||||
endfunction(extract_compressed_file)
|
||||
|
||||
if(Scylla_PROFDATA_FILE)
|
||||
if(NOT EXISTS "${Scylla_PROFDATA_FILE}")
|
||||
message(FATAL_ERROR
|
||||
"Specified Scylla_PROFDATA_FILE (${Scylla_PROFDATA_FILE}) does not exist")
|
||||
endif()
|
||||
set(profdata_file "${Scylla_PROFDATA_FILE}")
|
||||
elseif(Scylla_PROFDATA_COMPRESSED_FILE)
|
||||
# read the header to see if the file is fetched by LFS upon checkout
|
||||
file(READ "${Scylla_PROFDATA_COMPRESSED_FILE}" file_header LIMIT 7)
|
||||
if(file_header MATCHES "version")
|
||||
message(FATAL_ERROR "Please install git-lfs for using profdata stored in Git LFS")
|
||||
endif()
|
||||
extract_compressed_file(
|
||||
INPUT "${Scylla_PROFDATA_COMPRESSED_FILE}"
|
||||
OUTPUT "profdata_file")
|
||||
endif()
|
||||
|
||||
if(profdata_file)
|
||||
if(Scylla_BUILD_INSTRUMENTED STREQUAL "IR")
|
||||
# -fprofile-use is not allowed with -fprofile-generate
|
||||
message(WARNING "Only CSIR supports using and generating profdata at the same time.")
|
||||
unset(pgo_opts)
|
||||
endif()
|
||||
# When building with PGO, -Wbackend-plugin generates a warning for every
|
||||
# function which changed its control flow graph since the profile was
|
||||
# taken.
|
||||
# We allow stale profiles, so these warnings are just noise to us.
|
||||
# Let's silence them.
|
||||
string(APPEND CMAKE_CXX_FLAGS " -Wno-backend-plugin")
|
||||
string(APPEND CMAKE_CXX_FLAGS " -fprofile-use=\"${profdata_file}\"")
|
||||
endif()
|
||||
|
||||
if(pgo_opts)
|
||||
string(APPEND CMAKE_CXX_FLAGS "${pgo_opts}")
|
||||
string(APPEND CMAKE_EXE_LINKER_FLAGS "${pgo_opts}")
|
||||
string(APPEND CMAKE_SHARED_LINKER_FLAGS "${pgo_opts}")
|
||||
endif()
|
||||
|
||||
# Force SHA1 build-id generation
|
||||
add_link_options("LINKER:--build-id=sha1")
|
||||
include(CheckLinkerFlag)
|
||||
@@ -284,15 +156,3 @@ else()
|
||||
get_padded_dynamic_linker_option(dynamic_linker_option 511)
|
||||
endif()
|
||||
add_link_options("${dynamic_linker_option}")
|
||||
|
||||
if(Scylla_ENABLE_LTO)
|
||||
include(CheckIPOSupported)
|
||||
block()
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${linker_flag}")
|
||||
set(CMAKE_TRY_COMPILE_PLATFORM_VARIABLES CMAKE_EXE_LINKER_FLAGS)
|
||||
check_ipo_supported(RESULT ipo_supported OUTPUT error)
|
||||
if(NOT ipo_supported)
|
||||
message(FATAL_ERROR "LTO is not supported: ${error}")
|
||||
endif()
|
||||
endblock()
|
||||
endif()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user