Compare commits

..

1 Commits

Author SHA1 Message Date
Amnon Heiman
607ca719d7 Enable prometheus_allow_protobuf by default
Change the prometheus_allow_protobuf configuration to true by default.
This allows ScyllaDB server to serve Prometheus protobuf format (enables
native histogram support) if asked so by the monitoring server.

Update config help text/docs to reflect protobuf support (drop
“experimental” wording).

Add cluster tests to validate the default is enabled, can be overridden,
and /metrics returns protobuf when requested via Accept header (and
falls back to text when disabled).

Fixes #27817
co-Author: mykaul <mykaul@scylladb.com>

Signed-off-by: Amnon Heiman <amnon@scylladb.com>
2026-01-19 09:40:49 +02:00
4404 changed files with 40107 additions and 80218 deletions

4
.github/CODEOWNERS vendored
View File

@@ -92,10 +92,6 @@ test/boost/querier_cache_test.cc @denesb
# PYTEST-BASED CQL TESTS # PYTEST-BASED CQL TESTS
test/cqlpy/* @nyh test/cqlpy/* @nyh
# TEST FRAMEWORK
test/pylib/* @xtrey
test.py @xtrey
# RAFT # RAFT
raft/* @kbr-scylla @gleb-cloudius @kostja raft/* @kbr-scylla @gleb-cloudius @kostja
test/raft/* @kbr-scylla @gleb-cloudius @kostja test/raft/* @kbr-scylla @gleb-cloudius @kostja

View File

@@ -55,26 +55,22 @@ ninja build/<mode>/test/boost/<test_name>
ninja build/<mode>/scylla ninja build/<mode>/scylla
# Run all tests in a file # Run all tests in a file
./test.py --mode=<mode> test/<suite>/<test_name>.py ./test.py --mode=<mode> <test_path>
# Run a single test case from a file # Run a single test case from a file
./test.py --mode=<mode> test/<suite>/<test_name>.py::<test_function_name> ./test.py --mode=<mode> <test_path>::<test_function_name>
# Run all tests in a directory
./test.py --mode=<mode> test/<suite>/
# Examples # Examples
./test.py --mode=dev test/alternator/ ./test.py --mode=dev alternator/
./test.py --mode=dev test/cluster/test_raft_voters.py::test_raft_limited_voters_retain_coordinator ./test.py --mode=dev cluster/test_raft_voters::test_raft_limited_voters_retain_coordinator
./test.py --mode=dev test/cqlpy/test_json.py
# Optional flags # Optional flags
./test.py --mode=dev test/cluster/test_raft_no_quorum.py -v # Verbose output ./test.py --mode=dev cluster/test_raft_no_quorum -v # Verbose output
./test.py --mode=dev test/cluster/test_raft_no_quorum.py --repeat 5 # Repeat test 5 times ./test.py --mode=dev cluster/test_raft_no_quorum --repeat 5 # Repeat test 5 times
``` ```
**Important:** **Important:**
- Use full path with `.py` extension (e.g., `test/cluster/test_raft_no_quorum.py`, not `cluster/test_raft_no_quorum`) - Use path without `.py` extension (e.g., `cluster/test_raft_no_quorum`, not `cluster/test_raft_no_quorum.py`)
- To run a single test case, append `::<test_function_name>` to the file path - To run a single test case, append `::<test_function_name>` to the file path
- Add `-v` for verbose output - Add `-v` for verbose output
- Add `--repeat <num>` to repeat a test multiple times - Add `--repeat <num>` to repeat a test multiple times

View File

@@ -1,6 +1,6 @@
version: 2 version: 2
updates: updates:
- package-ecosystem: "uv" - package-ecosystem: "pip"
directory: "/docs" directory: "/docs"
schedule: schedule:
interval: "daily" interval: "daily"

View File

@@ -4,7 +4,7 @@
# Copyright (C) 2024-present ScyllaDB # Copyright (C) 2024-present ScyllaDB
# #
# #
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 # SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
# #
import argparse import argparse

View File

@@ -10,9 +10,6 @@ on:
types: [labeled, unlabeled] types: [labeled, unlabeled]
branches: [master, next, enterprise] branches: [master, next, enterprise]
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
check-commit: check-commit:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -33,7 +30,7 @@ jobs:
echo "DEFAULT_BRANCH=master" >> $GITHUB_ENV echo "DEFAULT_BRANCH=master" >> $GITHUB_ENV
fi fi
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
repository: ${{ github.repository }} repository: ${{ github.repository }}
ref: ${{ env.DEFAULT_BRANCH }} ref: ${{ env.DEFAULT_BRANCH }}

View File

@@ -5,18 +5,12 @@ on:
types: [opened, reopened, edited] types: [opened, reopened, edited]
branches: [branch-*] branches: [branch-*]
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
check-fixes-prefix: check-fixes-prefix:
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
issues: write
steps: steps:
- name: Check PR body for "Fixes" prefix patterns - name: Check PR body for "Fixes" prefix patterns
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 uses: actions/github-script@v7
with: with:
script: | script: |
const body = context.payload.pull_request.body; const body = context.payload.pull_request.body;
@@ -24,7 +18,7 @@ jobs:
// Regular expression pattern to check for "Fixes" prefix // Regular expression pattern to check for "Fixes" prefix
// Adjusted to dynamically insert the repository full name // Adjusted to dynamically insert the repository full name
const pattern = `Fixes:? ((?:#|${repo.replace('/', '\\/')}#|https://github\\.com/${repo.replace('/', '\\/')}/issues/)(\\d+)|(?:https://scylladb\\.atlassian\\.net/browse/)?([A-Z]+-\\d+))`; const pattern = `Fixes:? ((?:#|${repo.replace('/', '\\/')}#|https://github\\.com/${repo.replace('/', '\\/')}/issues/)(\\d+)|([A-Z]+-\\d+))`;
const regex = new RegExp(pattern); const regex = new RegExp(pattern);
if (!regex.test(body)) { if (!regex.test(body)) {

View File

@@ -12,9 +12,6 @@ on:
description: 'the md5sum for scylla executable' description: 'the md5sum for scylla executable'
value: ${{ jobs.build.outputs.md5sum }} value: ${{ jobs.build.outputs.md5sum }}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
read-toolchain: read-toolchain:
uses: ./.github/workflows/read-toolchain.yaml uses: ./.github/workflows/read-toolchain.yaml
@@ -27,7 +24,7 @@ jobs:
outputs: outputs:
md5sum: ${{ steps.checksum.outputs.md5sum }} md5sum: ${{ steps.checksum.outputs.md5sum }}
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive
- name: Generate the building system - name: Generate the building system

View File

@@ -1,53 +0,0 @@
name: Backport with Jira Integration
on:
push:
branches:
- master
- next-*.*
- branch-*.*
pull_request_target:
types: [labeled, closed]
branches:
- master
- next
- next-*.*
- branch-*.*
jobs:
backport-on-push:
if: github.event_name == 'push'
uses: scylladb/github-automation/.github/workflows/backport-with-jira.yaml@main
with:
event_type: 'push'
base_branch: ${{ github.ref }}
commits: ${{ github.event.before }}..${{ github.sha }}
secrets:
gh_token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
backport-on-label:
if: github.event_name == 'pull_request_target' && github.event.action == 'labeled'
uses: scylladb/github-automation/.github/workflows/backport-with-jira.yaml@main
with:
event_type: 'labeled'
base_branch: refs/heads/${{ github.event.pull_request.base.ref }}
pull_request_number: ${{ github.event.pull_request.number }}
head_commit: ${{ github.event.pull_request.base.sha }}
label_name: ${{ github.event.label.name }}
pr_state: ${{ github.event.pull_request.state }}
secrets:
gh_token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
backport-chain:
if: github.event_name == 'pull_request_target' && github.event.action == 'closed' && github.event.pull_request.merged == true
uses: scylladb/github-automation/.github/workflows/backport-with-jira.yaml@main
with:
event_type: 'chain'
base_branch: refs/heads/${{ github.event.pull_request.base.ref }}
pull_request_number: ${{ github.event.pull_request.number }}
pr_body: ${{ github.event.pull_request.body }}
secrets:
gh_token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}

View File

@@ -1,8 +1,8 @@
name: Sync Jira Based on PR Events name: Sync Jira Based on PR Events
on: on:
pull_request_target: pull_request_target:
types: [opened, edited, ready_for_review, review_requested, labeled, unlabeled, closed] types: [opened, ready_for_review, review_requested, labeled, unlabeled, closed]
permissions: permissions:
contents: read contents: read
@@ -10,9 +10,32 @@ permissions:
issues: write issues: write
jobs: jobs:
jira-sync: jira-sync-pr-opened:
uses: scylladb/github-automation/.github/workflows/main_pr_events_jira_sync.yml@main if: github.event.action == 'opened'
with: uses: scylladb/github-automation/.github/workflows/main_jira_sync_pr_opened.yml@main
caller_action: ${{ github.event.action }} secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
jira-sync-in-review:
if: github.event.action == 'ready_for_review' || github.event.action == 'review_requested'
uses: scylladb/github-automation/.github/workflows/main_jira_sync_in_review.yml@main
secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
jira-sync-add-label:
if: github.event.action == 'labeled'
uses: scylladb/github-automation/.github/workflows/main_jira_sync_add_label.yml@main
secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
jira-status-remove-label:
if: github.event.action == 'unlabeled'
uses: scylladb/github-automation/.github/workflows/main_jira_sync_remove_label.yml@main
secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
jira-status-pr-closed:
if: github.event.action == 'closed'
uses: scylladb/github-automation/.github/workflows/main_jira_sync_pr_closed.yml@main
secrets: secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }} caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}

View File

@@ -1,22 +0,0 @@
name: Sync Jira Based on PR Milestone Events
on:
pull_request_target:
types: [milestoned, demilestoned]
permissions:
contents: read
pull-requests: read
jobs:
jira-sync-milestone-set:
if: github.event.action == 'milestoned'
uses: scylladb/github-automation/.github/workflows/main_jira_sync_pr_milestone_set.yml@main
secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
jira-sync-milestone-removed:
if: github.event.action == 'demilestoned'
uses: scylladb/github-automation/.github/workflows/main_jira_sync_pr_milestone_removed.yml@main
secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}

View File

@@ -2,13 +2,13 @@ name: Call Jira release creation for new milestone
on: on:
milestone: milestone:
types: [created, closed] types: [created]
jobs: jobs:
sync-milestone-to-jira: sync-milestone-to-jira:
uses: scylladb/github-automation/.github/workflows/main_sync_milestone_to_jira_release.yml@main uses: scylladb/github-automation/.github/workflows/main_sync_milestone_to_jira_release.yml@main
with: with:
# Comma-separated list of Jira project keys # Comma-separated list of Jira project keys
jira_project_keys: "SCYLLADB,CUSTOMER,SMI,RELENG,VECTOR" jira_project_keys: "SCYLLADB,CUSTOMER"
secrets: secrets:
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }} caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}

View File

@@ -7,11 +7,6 @@ on:
- synchronize - synchronize
- reopened - reopened
permissions:
contents: read
pull-requests: write
statuses: write
jobs: jobs:
validate_pr_author_email: validate_pr_author_email:
uses: scylladb/github-automation/.github/workflows/validate_pr_author_email.yml@main uses: scylladb/github-automation/.github/workflows/validate_pr_author_email.yml@main

View File

@@ -7,9 +7,8 @@ on:
env: env:
HEADER_CHECK_LINES: 10 HEADER_CHECK_LINES: 10
LICENSE: "LicenseRef-ScyllaDB-Source-Available-1.1" LICENSE: "LicenseRef-ScyllaDB-Source-Available-1.0"
CHECKED_EXTENSIONS: ".cc .hh .py" CHECKED_EXTENSIONS: ".cc .hh .py"
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
check-license-headers: check-license-headers:
@@ -20,7 +19,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -41,7 +40,7 @@ jobs:
- name: Comment on PR if check fails - name: Comment on PR if check fails
if: failure() if: failure()
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 uses: actions/github-script@v7
with: with:
script: | script: |
const license = '${{ env.LICENSE }}'; const license = '${{ env.LICENSE }}';

View File

@@ -9,7 +9,6 @@ env:
# use the development branch explicitly # use the development branch explicitly
CLANG_VERSION: 21 CLANG_VERSION: 21
BUILD_DIR: build BUILD_DIR: build
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
permissions: {} permissions: {}
@@ -33,7 +32,7 @@ jobs:
steps: steps:
- run: | - run: |
sudo dnf -y install git sudo dnf -y install git
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Install build dependencies - name: Install build dependencies

View File

@@ -18,7 +18,6 @@ env:
BUILD_TYPE: RelWithDebInfo BUILD_TYPE: RelWithDebInfo
BUILD_DIR: build BUILD_DIR: build
CLANG_TIDY_CHECKS: '-*,bugprone-use-after-move' CLANG_TIDY_CHECKS: '-*,bugprone-use-after-move'
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
permissions: {} permissions: {}
@@ -43,7 +42,7 @@ jobs:
IMAGE: ${{ needs.read-toolchain.image }} IMAGE: ${{ needs.read-toolchain.image }}
run: | run: |
echo ${{ needs.read-toolchain.image }} echo ${{ needs.read-toolchain.image }}
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- run: | - run: |

View File

@@ -1,65 +0,0 @@
name: Close issues created by Scylla associates
on:
issues:
types: [opened, reopened]
permissions:
issues: write
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs:
comment-and-close:
runs-on: ubuntu-latest
steps:
- name: Comment and close if author email is scylladb.com
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const issue = context.payload.issue;
const actor = context.actor;
// Get user data (only public email is available)
const { data: user } = await github.rest.users.getByUsername({
username: actor,
});
const email = user.email || "";
console.log(`Actor: ${actor}, public email: ${email || "<none>"}`);
// Only continue if email exists and ends with @scylladb.com
if (!email || !email.toLowerCase().endsWith("@scylladb.com")) {
console.log("User is not a scylladb.com email (or email not public); skipping.");
return;
}
const owner = context.repo.owner;
const repo = context.repo.repo;
const issue_number = issue.number;
const body = "Issues in this repository are closed automatically. Scylla associates should use Jira to manage issues.\nPlease move this issue to Jira https://scylladb.atlassian.net/jira/software/c/projects/SCYLLADB/list";
// Add the comment
await github.rest.issues.createComment({
owner,
repo,
issue_number,
body,
});
console.log(`Comment added to #${issue_number}`);
// Close the issue
await github.rest.issues.update({
owner,
repo,
issue_number,
state: "closed",
state_reason: "not_planned"
});
console.log(`Issue #${issue_number} closed.`);

View File

@@ -4,15 +4,13 @@ on:
branches: branches:
- master - master
permissions: {} permissions: {}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
codespell: codespell:
name: Check for spelling errors name: Check for spelling errors
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
- uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2.2 - uses: codespell-project/actions-codespell@master
with: with:
only_warn: 1 only_warn: 1
ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison,iif,tread" ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison,iif,tread"

View File

@@ -1,38 +0,0 @@
name: Compare Build Systems
on:
pull_request:
branches:
- master
paths:
- 'configure.py'
- '**/CMakeLists.txt'
- 'cmake/**'
- 'scripts/compare_build_systems.py'
workflow_dispatch:
permissions:
contents: read
# cancel the in-progress run upon a repush
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
read-toolchain:
uses: ./.github/workflows/read-toolchain.yaml
compare:
name: Compare configure.py vs CMake
needs:
- read-toolchain
runs-on: ubuntu-latest
container: ${{ needs.read-toolchain.outputs.image }}
steps:
- uses: actions/checkout@v4
with:
submodules: true
- name: Compare build systems
run: |
git config --global --add safe.directory $GITHUB_WORKSPACE
python3 scripts/compare_build_systems.py --ci

View File

@@ -12,16 +12,13 @@ on:
schedule: schedule:
- cron: '0 10 * * 1' # Runs every Monday at 10:00am - cron: '0 10 * * 1' # Runs every Monday at 10:00am
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
notify_conflict_prs: notify_conflict_prs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Notify PR Authors of Conflicts - name: Notify PR Authors of Conflicts
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 uses: actions/github-script@v7
with: with:
script: | script: |
console.log("Starting conflict reminder script..."); console.log("Starting conflict reminder script...");

View File

@@ -13,9 +13,6 @@ on:
permissions: permissions:
contents: read contents: read
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
lint: lint:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -24,12 +21,12 @@ jobs:
security-events: write security-events: write
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Differential ShellCheck - name: Differential ShellCheck
uses: redhat-plumbers-in-action/differential-shellcheck@d965e66ec0b3b2f821f75c8eff9b12442d9a7d1e # v5.5.6 uses: redhat-plumbers-in-action/differential-shellcheck@v5
with: with:
severity: warning severity: warning
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -5,7 +5,6 @@ name: "Docs / Publish"
env: env:
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }} FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
DEFAULT_BRANCH: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'master' }} DEFAULT_BRANCH: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'master' }}
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
on: on:
push: push:
@@ -20,23 +19,19 @@ on:
jobs: jobs:
release: release:
permissions: permissions:
pages: write
id-token: write
contents: write contents: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
ref: ${{ env.DEFAULT_BRANCH }} ref: ${{ env.DEFAULT_BRANCH }}
persist-credentials: false persist-credentials: false
fetch-depth: 0 fetch-depth: 0
- name: Set up Python - name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.10"
- name: Install uv
uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0
- name: Set up env - name: Set up env
run: make -C docs FLAG="${{ env.FLAG }}" setupenv run: make -C docs FLAG="${{ env.FLAG }}" setupenv
- name: Build docs - name: Build docs

View File

@@ -7,7 +7,6 @@ permissions:
env: env:
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }} FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
on: on:
pull_request: pull_request:
@@ -23,16 +22,14 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
persist-credentials: false persist-credentials: false
fetch-depth: 0 fetch-depth: 0
- name: Set up Python - name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 uses: actions/setup-python@v5
with: with:
python-version: "3.12" python-version: "3.10"
- name: Install uv
uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0
- name: Set up env - name: Set up env
run: make -C docs FLAG="${{ env.FLAG }}" setupenv run: make -C docs FLAG="${{ env.FLAG }}" setupenv
- name: Build docs - name: Build docs

View File

@@ -3,9 +3,6 @@ name: Docs / Validate metrics
permissions: permissions:
contents: read contents: read
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
on: on:
pull_request: pull_request:
branches: branches:
@@ -24,12 +21,12 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Set up Python - name: Set up Python
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 uses: actions/setup-python@v6
with: with:
python-version: '3.10' python-version: '3.10'

View File

@@ -13,10 +13,8 @@ env:
# supposed to be processed by idl-compiler.py, so we don't check them using the cleaner # supposed to be processed by idl-compiler.py, so we don't check them using the cleaner
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation mutation_writer node_ops raft redis replica service CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation mutation_writer node_ops raft redis replica service
SEASTAR_BAD_INCLUDE_OUTPUT_PATH: build/seastar-bad-include.log SEASTAR_BAD_INCLUDE_OUTPUT_PATH: build/seastar-bad-include.log
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
permissions: permissions: {}
contents: read
# cancel the in-progress run upon a repush # cancel the in-progress run upon a repush
concurrency: concurrency:
@@ -33,9 +31,11 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: ${{ needs.read-toolchain.outputs.image }} container: ${{ needs.read-toolchain.outputs.image }}
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- run: |
sudo dnf -y install clang-tools-extra
- name: Generate compilation database - name: Generate compilation database
run: | run: |
cmake \ cmake \
@@ -90,7 +90,7 @@ jobs:
| tee "$SEASTAR_BAD_INCLUDE_OUTPUT_PATH" | tee "$SEASTAR_BAD_INCLUDE_OUTPUT_PATH"
- run: | - run: |
echo "::remove-matcher owner=seastar-bad-include::" echo "::remove-matcher owner=seastar-bad-include::"
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0 - uses: actions/upload-artifact@v4
with: with:
name: Logs name: Logs
path: | path: |

View File

@@ -7,7 +7,6 @@ on:
env: env:
DEFAULT_BRANCH: 'master' DEFAULT_BRANCH: 'master'
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
mark-ready: mark-ready:
@@ -18,7 +17,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
repository: ${{ github.repository }} repository: ${{ github.repository }}
ref: ${{ env.DEFAULT_BRANCH }} ref: ${{ env.DEFAULT_BRANCH }}

View File

@@ -5,8 +5,6 @@ on:
branches: branches:
- master - master
- next - next
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
label: label:
if: github.event.pull_request.draft == false if: github.event.pull_request.draft == false
@@ -17,7 +15,7 @@ jobs:
steps: steps:
- name: Wait for label to be added - name: Wait for label to be added
run: sleep 1m run: sleep 1m
- uses: mheap/github-action-required-labels@0ac283b4e65c1fb28ce6079dea5546ceca98ccbe # v5.5.2 - uses: mheap/github-action-required-labels@v5
with: with:
mode: minimum mode: minimum
count: 1 count: 1

View File

@@ -7,9 +7,6 @@ on:
description: "the toolchain docker image" description: "the toolchain docker image"
value: ${{ jobs.read-toolchain.outputs.image }} value: ${{ jobs.read-toolchain.outputs.image }}
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
read-toolchain: read-toolchain:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -18,7 +15,7 @@ jobs:
outputs: outputs:
image: ${{ steps.read.outputs.image }} image: ${{ steps.read.outputs.image }}
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
sparse-checkout: tools/toolchain/image sparse-checkout: tools/toolchain/image
sparse-checkout-cone-mode: false sparse-checkout-cone-mode: false

View File

@@ -13,7 +13,6 @@ concurrency:
env: env:
BUILD_DIR: build BUILD_DIR: build
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
read-toolchain: read-toolchain:
@@ -30,12 +29,12 @@ jobs:
- RelWithDebInfo - RelWithDebInfo
- Dev - Dev
steps: steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- run: | - run: |
rm -rf seastar rm -rf seastar
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/checkout@v4
with: with:
repository: scylladb/seastar repository: scylladb/seastar
submodules: true submodules: true

View File

@@ -7,9 +7,6 @@ on:
issues: issues:
types: [labeled, unlabeled] types: [labeled, unlabeled]
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
label-sync: label-sync:
if: ${{ github.repository == 'scylladb/scylladb' }} if: ${{ github.repository == 'scylladb/scylladb' }}
@@ -24,7 +21,7 @@ jobs:
GITHUB_CONTEXT: ${{ toJson(github) }} GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT" run: echo "$GITHUB_CONTEXT"
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v4
with: with:
sparse-checkout: | sparse-checkout: |
.github/scripts/sync_labels.py .github/scripts/sync_labels.py

View File

@@ -1,6 +1,4 @@
name: Trigger Scylla CI Route name: Trigger Scylla CI Route
permissions:
contents: read
on: on:
issue_comment: issue_comment:
@@ -11,56 +9,16 @@ on:
jobs: jobs:
trigger-jenkins: trigger-jenkins:
if: (github.event_name == 'issue_comment' && github.event.comment.user.login != 'scylladbbot') || github.event.label.name == 'conflicts' if: (github.event.comment.user.login != 'scylladbbot' && contains(github.event.comment.body, '@scylladbbot') && contains(github.event.comment.body, 'trigger-ci')) || github.event.label.name == 'conflicts'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Verify Org Membership
id: verify_author
env:
EVENT_NAME: ${{ github.event_name }}
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
PR_ASSOCIATION: ${{ github.event.pull_request.author_association }}
COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
COMMENT_ASSOCIATION: ${{ github.event.comment.author_association }}
shell: bash
run: |
if [[ "$EVENT_NAME" == "pull_request_target" ]]; then
AUTHOR="$PR_AUTHOR"
ASSOCIATION="$PR_ASSOCIATION"
else
AUTHOR="$COMMENT_AUTHOR"
ASSOCIATION="$COMMENT_ASSOCIATION"
fi
if [[ "$ASSOCIATION" == "MEMBER" || "$ASSOCIATION" == "OWNER" ]]; then
echo "member=true" >> $GITHUB_OUTPUT
else
echo "::warning::${AUTHOR} is not a member of scylladb (association: ${ASSOCIATION}); skipping CI trigger."
echo "member=false" >> $GITHUB_OUTPUT
fi
- name: Validate Comment Trigger
if: github.event_name == 'issue_comment'
id: verify_comment
env:
COMMENT_BODY: ${{ github.event.comment.body }}
shell: bash
run: |
CLEAN_BODY=$(echo "$COMMENT_BODY" | grep -v '^[[:space:]]*>')
if echo "$CLEAN_BODY" | grep -qi '@scylladbbot' && echo "$CLEAN_BODY" | grep -qi 'trigger-ci'; then
echo "trigger=true" >> $GITHUB_OUTPUT
else
echo "trigger=false" >> $GITHUB_OUTPUT
fi
- name: Trigger Scylla-CI-Route Jenkins Job - name: Trigger Scylla-CI-Route Jenkins Job
if: steps.verify_author.outputs.member == 'true' && (github.event_name == 'pull_request_target' || steps.verify_comment.outputs.trigger == 'true')
env: env:
JENKINS_USER: ${{ secrets.JENKINS_USERNAME }} JENKINS_USER: ${{ secrets.JENKINS_USERNAME }}
JENKINS_API_TOKEN: ${{ secrets.JENKINS_TOKEN }} JENKINS_API_TOKEN: ${{ secrets.JENKINS_TOKEN }}
JENKINS_URL: "https://jenkins.scylladb.com" JENKINS_URL: "https://jenkins.scylladb.com"
PR_NUMBER: "${{ github.event.issue.number || github.event.pull_request.number }}"
PR_REPO_NAME: "${{ github.event.repository.full_name }}"
run: | run: |
PR_NUMBER=${{ github.event.issue.number }}
PR_REPO_NAME=${{ github.event.repository.full_name }}
curl -X POST "$JENKINS_URL/job/releng/job/Scylla-CI-Route/buildWithParameters?PR_NUMBER=$PR_NUMBER&PR_REPO_NAME=$PR_REPO_NAME" \ curl -X POST "$JENKINS_URL/job/releng/job/Scylla-CI-Route/buildWithParameters?PR_NUMBER=$PR_NUMBER&PR_REPO_NAME=$PR_REPO_NAME" \
--user "$JENKINS_USER:$JENKINS_API_TOKEN" --fail --user "$JENKINS_USER:$JENKINS_API_TOKEN" --fail -i -v

View File

@@ -5,10 +5,7 @@ on:
types: [opened, reopened, synchronize] types: [opened, reopened, synchronize]
issue_comment: issue_comment:
types: [created] types: [created]
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
trigger-ci: trigger-ci:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -18,7 +15,7 @@ jobs:
GITHUB_CONTEXT: ${{ toJson(github) }} GITHUB_CONTEXT: ${{ toJson(github) }}
run: echo "$GITHUB_CONTEXT" run: echo "$GITHUB_CONTEXT"
- name: Checkout PR code - name: Checkout PR code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@v3
with: with:
fetch-depth: 0 # Needed to access full history fetch-depth: 0 # Needed to access full history
ref: ${{ github.event.pull_request.head.ref }} ref: ${{ github.event.pull_request.head.ref }}

View File

@@ -1,8 +1,5 @@
name: Trigger next gating name: Trigger next gating
permissions:
contents: read
on: on:
push: push:
branches: branches:

View File

@@ -4,16 +4,13 @@ on:
schedule: schedule:
- cron: '10 8 * * *' # Runs daily at 8 AM - cron: '10 8 * * *' # Runs daily at 8 AM
env:
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
jobs: jobs:
reminder: reminder:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Send reminders - name: Send reminders
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 uses: actions/github-script@v7
with: with:
script: | script: |
const labelFilters = ['P0', 'P1', 'Field-Tier1','status/release blocker', 'status/regression']; const labelFilters = ['P0', 'P1', 'Field-Tier1','status/release blocker', 'status/regression'];

View File

@@ -2,12 +2,6 @@ cmake_minimum_required(VERSION 3.27)
project(scylla) project(scylla)
# Disable CMake's automatic -fcolor-diagnostics injection (CMake 3.24+ adds
# it for Clang+Ninja). configure.py does not add any color diagnostics flags,
# so we clear the internal CMake variable to prevent injection.
set(CMAKE_CXX_COMPILE_OPTIONS_COLOR_DIAGNOSTICS "")
set(CMAKE_C_COMPILE_OPTIONS_COLOR_DIAGNOSTICS "")
list(APPEND CMAKE_MODULE_PATH list(APPEND CMAKE_MODULE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake
${CMAKE_CURRENT_SOURCE_DIR}/seastar/cmake) ${CMAKE_CURRENT_SOURCE_DIR}/seastar/cmake)
@@ -57,16 +51,6 @@ set(CMAKE_CXX_EXTENSIONS ON CACHE INTERNAL "")
set(CMAKE_CXX_SCAN_FOR_MODULES OFF CACHE INTERNAL "") set(CMAKE_CXX_SCAN_FOR_MODULES OFF CACHE INTERNAL "")
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
# Global defines matching configure.py
# Since gcc 13, libgcc doesn't need the exception workaround
add_compile_definitions(SEASTAR_NO_EXCEPTION_HACK)
# Hacks needed to expose internal APIs for xxhash dependencies
add_compile_definitions(XXH_PRIVATE_API)
# SEASTAR_TESTING_MAIN is added later (after add_subdirectory(seastar) and
# add_subdirectory(abseil)) to avoid leaking into the seastar subdirectory.
# If SEASTAR_TESTING_MAIN is defined globally before seastar, it causes a
# duplicate 'main' symbol in seastar_testing.
if(is_multi_config) if(is_multi_config)
find_package(Seastar) find_package(Seastar)
# this is atypical compared to standard ExternalProject usage: # this is atypical compared to standard ExternalProject usage:
@@ -112,33 +96,12 @@ else()
set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE) set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE)
set(Seastar_EXCLUDE_TESTS_FROM_ALL ON CACHE BOOL "" FORCE) set(Seastar_EXCLUDE_TESTS_FROM_ALL ON CACHE BOOL "" FORCE)
set(Seastar_IO_URING ON CACHE BOOL "" FORCE) set(Seastar_IO_URING ON CACHE BOOL "" FORCE)
set(Seastar_SCHEDULING_GROUPS_COUNT 24 CACHE STRING "" FORCE) set(Seastar_SCHEDULING_GROUPS_COUNT 21 CACHE STRING "" FORCE)
set(Seastar_UNUSED_RESULT_ERROR ON CACHE BOOL "" FORCE) set(Seastar_UNUSED_RESULT_ERROR ON CACHE BOOL "" FORCE)
# Match configure.py's build_seastar_shared_libs: Debug and Dev
# build Seastar as a shared library, others build it static.
if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "Dev")
set(BUILD_SHARED_LIBS ON CACHE BOOL "" FORCE)
else()
set(BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE)
endif()
add_subdirectory(seastar) add_subdirectory(seastar)
target_compile_definitions (seastar
# Coverage mode sets cmake_build_type='Debug' for Seastar PRIVATE
# (configure.py:515), so Seastar's pkg-config output includes sanitizer SEASTAR_NO_EXCEPTION_HACK)
# link flags in seastar_libs_coverage (configure.py:2514,2649).
# Seastar's own CMake only activates sanitizer targets for Debug/Sanitize
# configs, so we inject link options on the seastar target for Coverage.
# Using PUBLIC ensures they propagate to all targets linking Seastar
# (but not standalone tools like patchelf), matching configure.py's
# behavior. Compile-time flags and defines are handled globally in
# cmake/mode.Coverage.cmake.
if(CMAKE_BUILD_TYPE STREQUAL "Coverage")
target_link_options(seastar
PUBLIC
-fsanitize=address
-fsanitize=undefined
-fsanitize=vptr)
endif()
endif() endif()
set(ABSL_PROPAGATE_CXX_STD ON CACHE BOOL "" FORCE) set(ABSL_PROPAGATE_CXX_STD ON CACHE BOOL "" FORCE)
@@ -148,10 +111,8 @@ if(Scylla_ENABLE_LTO)
endif() endif()
find_package(Sanitizers QUIET) find_package(Sanitizers QUIET)
# Match configure.py:2192 — abseil gets sanitizer flags with -fno-sanitize=vptr
# to exclude vptr checks which are incompatible with abseil's usage.
list(APPEND absl_cxx_flags list(APPEND absl_cxx_flags
$<$<CONFIG:Debug,Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>;-fno-sanitize=vptr>) $<$<CONFIG:Debug,Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>>)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
list(APPEND ABSL_GCC_FLAGS ${absl_cxx_flags}) list(APPEND ABSL_GCC_FLAGS ${absl_cxx_flags})
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
@@ -176,38 +137,9 @@ add_library(absl::headers ALIAS absl-headers)
# unfortunately. # unfortunately.
set_target_properties(absl_strerror PROPERTIES EXCLUDE_FROM_ALL TRUE) set_target_properties(absl_strerror PROPERTIES EXCLUDE_FROM_ALL TRUE)
# Now that seastar and abseil subdirectories are fully processed, add
# SEASTAR_TESTING_MAIN globally. This matches configure.py's global define
# without leaking into seastar (which would cause duplicate main symbols).
add_compile_definitions(SEASTAR_TESTING_MAIN)
# System libraries dependencies # System libraries dependencies
find_package(Boost REQUIRED find_package(Boost REQUIRED
COMPONENTS filesystem program_options system thread regex unit_test_framework) COMPONENTS filesystem program_options system thread regex unit_test_framework)
# When using shared Boost libraries, define BOOST_ALL_DYN_LINK (matching configure.py)
if(NOT Boost_USE_STATIC_LIBS)
add_compile_definitions(BOOST_ALL_DYN_LINK)
endif()
# CMake's Boost package config adds per-component defines like
# BOOST_UNIT_TEST_FRAMEWORK_DYN_LINK, BOOST_REGEX_DYN_LINK, etc. on the
# imported targets. configure.py only uses BOOST_ALL_DYN_LINK (which covers
# all components), so strip the per-component defines to align the two build
# systems.
foreach(_boost_target
Boost::unit_test_framework
Boost::regex
Boost::filesystem
Boost::program_options
Boost::system
Boost::thread)
if(TARGET ${_boost_target})
# Completely remove all INTERFACE_COMPILE_DEFINITIONS from the Boost target.
# This prevents per-component *_DYN_LINK and *_NO_LIB defines from
# propagating. BOOST_ALL_DYN_LINK (set globally) covers all components.
set_property(TARGET ${_boost_target} PROPERTY INTERFACE_COMPILE_DEFINITIONS)
endif()
endforeach()
target_link_libraries(Boost::regex target_link_libraries(Boost::regex
INTERFACE INTERFACE
ICU::i18n ICU::i18n
@@ -264,10 +196,6 @@ if (Scylla_USE_PRECOMPILED_HEADER)
message(STATUS "Using precompiled header for Scylla - remember to add `sloppiness = pch_defines,time_macros` to ccache.conf, if you're using ccache.") message(STATUS "Using precompiled header for Scylla - remember to add `sloppiness = pch_defines,time_macros` to ccache.conf, if you're using ccache.")
target_precompile_headers(scylla-precompiled-header PRIVATE "stdafx.hh") target_precompile_headers(scylla-precompiled-header PRIVATE "stdafx.hh")
target_compile_definitions(scylla-precompiled-header PRIVATE SCYLLA_USE_PRECOMPILED_HEADER) target_compile_definitions(scylla-precompiled-header PRIVATE SCYLLA_USE_PRECOMPILED_HEADER)
# Match configure.py: -fpch-validate-input-files-content tells the compiler
# to check content of stdafx.hh if timestamps don't match (important for
# ccache/git workflows where timestamps may not be preserved).
add_compile_options(-fpch-validate-input-files-content)
endif() endif()
else() else()
set(Scylla_USE_PRECOMPILED_HEADER_USE OFF) set(Scylla_USE_PRECOMPILED_HEADER_USE OFF)
@@ -372,6 +300,7 @@ add_subdirectory(locator)
add_subdirectory(message) add_subdirectory(message)
add_subdirectory(mutation) add_subdirectory(mutation)
add_subdirectory(mutation_writer) add_subdirectory(mutation_writer)
add_subdirectory(node_ops)
add_subdirectory(readers) add_subdirectory(readers)
add_subdirectory(replica) add_subdirectory(replica)
add_subdirectory(raft) add_subdirectory(raft)

View File

@@ -1,8 +1,8 @@
## **SCYLLADB SOFTWARE LICENSE AGREEMENT** ## **SCYLLADB SOFTWARE LICENSE AGREEMENT**
| Version: | 1.1 | | Version: | 1.0 |
| :---- | :---- | | :---- | :---- |
| Last updated: | April 12, 2026 | | Last updated: | December 18, 2024 |
**Your Acceptance** **Your Acceptance**
@@ -12,48 +12,20 @@ The terms "**You**" or "**Licensee**" refer to any individual accessing or using
**Grant of License** **Grant of License**
* **Definitions:** * **Software Definitions:** Software means the ScyllaDB software provided by Licensor, including the source code, object code, and any accompanying documentation or tools, or any part thereof, as made available under this Agreement.
1. **Software:** Software means the ScyllaDB software provided by Licensor, including the source code, object code, and any accompanying documentation or tools, or any part thereof, as made available under this Agreement. * **Grant of License:** Subject to the terms and conditions of this Agreement, Licensor grants You a limited, non-exclusive, revocable, non-sublicensable, non-transferable, royalty free license to Use the Software, in each case solely for the purposes of:
2. **Commercial Customer**: means any legal entity (including its Affiliates) that has entered into a transaction with Licensor, or an authorized reseller/distributor, for the provision of any ScyllaDB products or services. This includes, without limitation: (a) Scope of Service: Any paid subscription, enterprise license, "BYOA" or Database-as-a-Service (DBaaS) offering, technical support, professional services, consulting, or training. (b) Scale and Volume: Any deployment regardless of size, capacity, or performance metrics (c) Payment Method: Any compensation model, including but not limited to, fixed-fee, consumption-based (On-Demand), committed spend, third-party marketplace credits (e.g., AWS, GCP, Azure), or promotional credits and discounts.
* **Grant of License:** Subject to the terms and conditions of this Agreement, including the Eligibility and Exclusive Use Restrictions clause, Licensor grants You a limited, non-exclusive, revocable, non-sublicensable, non-transferable, royalty free license to Use the Software, in each case solely for the purposes of:
1) Copying, distributing, evaluating (including performing benchmarking or comparative tests or evaluations , subject to the limitations below) and improving the Software and ScyllaDB; and 1) Copying, distributing, evaluating (including performing benchmarking or comparative tests or evaluations , subject to the limitations below) and improving the Software and ScyllaDB; and
2) create a modified version of the Software (each, a "**Licensed Work**"); provided however, that each such Licensed Work keeps all or substantially all of the functions and features of the Software, and/or using all or substantially all of the source code of the Software. You hereby agree that all the Licensed Work are, upon creation, considered Licensed Work of the Licensor, shall be the sole property of the Licensor and its assignees, and the Licensor and its assignees shall be the sole owner of all rights of any kind or nature, in connection with such Licensed Work. You hereby irrevocably and unconditionally assign to the Licensor all the Licensed Work and any part thereof. This License applies separately for each version of the Licensed Work, which shall be considered "Software" for the purpose of this Agreement. 2) create a modified version of the Software (each, a "**Licensed Work**"); provided however, that each such Licensed Work keeps all or substantially all of the functions and features of the Software, and/or using all or substantially all of the source code of the Software. You hereby agree that all the Licensed Work are, upon creation, considered Licensed Work of the Licensor, shall be the sole property of the Licensor and its assignees, and the Licensor and its assignees shall be the sole owner of all rights of any kind or nature, in connection with such Licensed Work. You hereby irrevocably and unconditionally assign to the Licensor all the Licensed Work and any part thereof. This License applies separately for each version of the Licensed Work, which shall be considered "Software" for the purpose of this Agreement.
* **Eligibility and Exclusive Use Restrictions**
i. Restricted to "Never Customers" Only. The license granted under this Agreement is strictly limited to Never Customers. For purposes of this Agreement, a "Never Customer" is an entity (including its Affiliates) that does not have, and has not had within the previous twelve (12) months, a paid commercial subscription, professional services agreement, or any other commercial relationship with Licensor. Satisfaction of the Never Customer criteria is a strict condition precedent to the effectiveness of this License. **License Limitations, Restrictions and Obligations:** The license grant above is subject to the following limitations, restrictions, and obligations. If Licensees Use of the Software does not comply with the above license grant or the terms of this section (including exceeding the Usage Limit set forth below), Licensee must: (i) refrain from any Use of the Software; and (ii) purchase a [commercial paid license](https://www.scylladb.com/scylladb-proprietary-software-license-agreement/) from the Licensor.
ii. Total Prohibition for Existing Commercial Customers. If You (or any of Your Affiliates) are an existing Commercial Customer of Licensor within the last twelve (12) months, no license is deemed to have been offered or extended to You, and any download or installation of the Software by You is unauthorized. This prohibition applies to all deployments, including but not limited to:
(a) existing commercial workloads;
(b) any new use cases, new applications, or new workloads
iii. **No "Hybrid" Usage**. Licensee is expressly prohibited from combining free tier usage under this Agreement with paid commercial units.
If You are a Commercial Customer, all use of the Software across Your entire organization (and any of your Affiliates) must be governed by a valid, paid commercial agreement. Use of the Software under this license by a Commercial Customer (which is not a "Never Customer") shall:
(a) Void this license *ab initio*;
(b) Be deemed a material breach of both this Agreement and any existing commercial terms; and
(c) Entitle Licensor to invoice Licensee for such unauthorized usage at Licensor's standard list prices, retroactive to the date of first use.
Notwithstanding anything to the contrary in the Eligibility or License Limitations sections above a Commercial Customer may use the Software exclusively for non-production purposes, including Continuous Integration (CI), automated testing, and quality assurance environments, provided that such use at all times remains compliant with the Usage Limit.
iv. **Verification**. Licensor reserves the right to audit Licensee's environment and corporate identity to ensure compliance with these eligibility criteria.
For the purposes of this Agreement an "**Affiliate**" means any entity that directly or indirectly controls, is controlled by, or is under common control with a party, where "control" means ownership of more than 50% of the voting stock or decision-making authority
**License Limitations, Restrictions and Obligations:** The license grant above is subject to the following limitations, restrictions, and obligations. If Licensees Use of the Software does not comply with the above license grant or the terms of this section (including exceeding the Usage Limit set forth below), Licensee must: (i) refrain from any Use of the Software; and (ii) unless Licensee is a Never Customer, purchase a [commercial paid license](https://www.scylladb.com/scylladb-proprietary-software-license-agreement/) from the Licensor.
* **Updates:** You shall be solely responsible for providing all equipment, systems, assets, access, and ancillary goods and services needed to access and Use the Software. Licensor may modify or update the Software at any time, without notification, in its sole and absolute discretion. After the effective date of each such update, Licensor shall bear no obligation to run, provide or support legacy versions of the Software. * **Updates:** You shall be solely responsible for providing all equipment, systems, assets, access, and ancillary goods and services needed to access and Use the Software. Licensor may modify or update the Software at any time, without notification, in its sole and absolute discretion. After the effective date of each such update, Licensor shall bear no obligation to run, provide or support legacy versions of the Software.
* **"Usage Limit":** Licensee's total overall available storage across all deployments and clusters of the Software and the Licensed Work under this License shall not exceed 10TB and/or an upper limit of 50 VCPUs (hyper threads). * **"Usage Limit":** Licensee's total overall available storage across all deployments and clusters of the Software and the Licensed Work under this License shall not exceed 10TB and/or an upper limit of 50 VCPUs (hyper threads).
* **IP Markings:** Licensee must retain all copyright, trademark, and other proprietary notices contained in the Software. You will not modify, delete, alter, remove, or obscure any intellectual property, including without limitations licensing, copyright, trademark, or any other notices of Licensor in the Software. * **IP Markings:** Licensee must retain all copyright, trademark, and other proprietary notices contained in the Software. You will not modify, delete, alter, remove, or obscure any intellectual property, including without limitations licensing, copyright, trademark, or any other notices of Licensor in the Software.
* **License Reproduction:** You must conspicuously display this Agreement on each copy of the Software. If You receive the Software from a third party, this Agreement still applies to Your Use of the Software. You will be responsible for any breach of this Agreement by any such third-party. * **License Reproduction:** You must conspicuously display this Agreement on each copy of the Software. If You receive the Software from a third party, this Agreement still applies to Your Use of the Software. You will be responsible for any breach of this Agreement by any such third-party.
* Distribution of any Licensed Works is permitted, provided that: (i) You must include in any Licensed Work prominent notices stating that You have modified the Software, (ii) You include a copy of this Agreement with the Licensed Work, and (iii) You clearly identify all modifications made in the Licensed Work and provides attribution to the Licensor as the original author(s) of the Software. * Distribution of any Licensed Works is permitted, provided that: (i) You must include in any Licensed Work prominent notices stating that You have modified the Software, (ii) You include a copy of this Agreement with the Licensed Work, and (iii) You clearly identify all modifications made in the Licensed Work and provides attribution to the Licensor as the original author(s) of the Software.
* **Commercial Use Restrictions:** Licensee may not offer the Software as a software-as-a-service (SaaS) or commercial database-as-as-service (dBaaS) offering. Licensee may not use the Software to compete with Licensor's existing or future products or services. If your Use of the Software does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its Affiliated entities, or you must refrain from using the Software and all Licensed Work. Furthermore, if You make any written claim of patent infringement relating to the Software, Your patent license for the Software granted under this Agreement terminates immediately. * **Commercial Use Restrictions:** Licensee may not offer the Software as a software-as-a-service (SaaS) or commercial database-as-as-service (dBaaS) offering. Licensee may not use the Software to compete with Licensor's existing or future products or services. If your Use of the Software does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or you must refrain from using the Software and all Licensed Work. Furthermore, if You make any written claim of patent infringement relating to the Software, Your patent license for the Software granted under this Agreement terminates immediately.
* Notwithstanding anything to the contrary, under the License granted hereunder, You shall not and shall not permit others to: (i) transfer the Software or any portions thereof to any other party except as expressly permitted herein; (ii) attempt to circumvent or overcome any technological protection measures incorporated into the Software; (iii) incorporate the Software into the structure, machinery or controls of any aircraft, other aerial device, military vehicle, hovercraft, waterborne craft or any medical equipment of any kind; or (iv) use the Software or any part thereof in any unlawful, harmful or illegal manner, or in a manner which infringes third parties rights in any way, including intellectual property rights. * Notwithstanding anything to the contrary, under the License granted hereunder, You shall not and shall not permit others to: (i) transfer the Software or any portions thereof to any other party except as expressly permitted herein; (ii) attempt to circumvent or overcome any technological protection measures incorporated into the Software; (iii) incorporate the Software into the structure, machinery or controls of any aircraft, other aerial device, military vehicle, hovercraft, waterborne craft or any medical equipment of any kind; or (iv) use the Software or any part thereof in any unlawful, harmful or illegal manner, or in a manner which infringes third parties rights in any way, including intellectual property rights.
**Monitoring; Audit** **Monitoring; Audit**
@@ -69,14 +41,14 @@ For the purposes of this Agreement an "**Affiliate**" means any entity that dire
**Indemnity; Disclaimer; Limitation of Liability** **Indemnity; Disclaimer; Limitation of Liability**
* **Indemnity:** Licensee hereby agrees to indemnify, defend and hold harmless Licensor and its Affiliates from any losses or damages incurred due to a third party claim arising out of: (i) Licensees breach of this Agreement; (ii) Licensees negligence, willful misconduct or violation of law, or (iii) Licensees products or services. * **Indemnity:** Licensee hereby agrees to indemnify, defend and hold harmless Licensor and its affiliates from any losses or damages incurred due to a third party claim arising out of: (i) Licensees breach of this Agreement; (ii) Licensees negligence, willful misconduct or violation of law, or (iii) Licensees products or services.
* DISCLAIMER OF WARRANTIES: LICENSEE AGREES THAT LICENSOR HAS MADE NO EXPRESS WARRANTIES REGARDING THE SOFTWARE AND THAT THE SOFTWARE IS BEING PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. LICENSOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THE SOFTWARE, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION, ANY IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE; TITLE; MERCHANTABILITY; OR NON-INFRINGEMENT OF THIRD PARTY RIGHTS. LICENSOR DOES NOT WARRANT THAT THE SOFTWARE WILL OPERATE UNINTERRUPTED OR ERROR FREE, OR THAT ALL ERRORS WILL BE CORRECTED. LICENSOR DOES NOT GUARANTEE ANY PARTICULAR RESULTS FROM THE USE OF THE SOFTWARE, AND DOES NOT WARRANT THAT THE SOFTWARE IS FIT FOR ANY PARTICULAR PURPOSE. * DISCLAIMER OF WARRANTIES: LICENSEE AGREES THAT LICENSOR HAS MADE NO EXPRESS WARRANTIES REGARDING THE SOFTWARE AND THAT THE SOFTWARE IS BEING PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. LICENSOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THE SOFTWARE, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION, ANY IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE; TITLE; MERCHANTABILITY; OR NON-INFRINGEMENT OF THIRD PARTY RIGHTS. LICENSOR DOES NOT WARRANT THAT THE SOFTWARE WILL OPERATE UNINTERRUPTED OR ERROR FREE, OR THAT ALL ERRORS WILL BE CORRECTED. LICENSOR DOES NOT GUARANTEE ANY PARTICULAR RESULTS FROM THE USE OF THE SOFTWARE, AND DOES NOT WARRANT THAT THE SOFTWARE IS FIT FOR ANY PARTICULAR PURPOSE.
* LIMITATION OF LIABILITY: TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, IN NO EVENT WILL LICENSOR AND/OR ITS AFFILIATES, EMPLOYEES, OFFICERS AND DIRECTORS BE LIABLE TO LICENSEE FOR (I) ANY LOSS OF USE OR DATA; INTERRUPTION OF BUSINESS; OR ANY INDIRECT; SPECIAL; INCIDENTAL; OR CONSEQUENTIAL DAMAGES OF ANY KIND (INCLUDING LOST PROFITS); AND (II) ANY DIRECT DAMAGES EXCEEDING THE TOTAL AMOUNT OF ONE THOUSAND US DOLLARS ($1,000). THE FOREGOING PROVISIONS LIMITING THE LIABILITY OF LICENSOR SHALL APPLY REGARDLESS OF THE FORM OR CAUSE OF ACTION, WHETHER IN STRICT LIABILITY, CONTRACT OR TORT. * LIMITATION OF LIABILITY: TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, IN NO EVENT WILL LICENSOR AND/OR ITS AFFILIATES, EMPLOYEES, OFFICERS AND DIRECTORS BE LIABLE TO LICENSEE FOR (I) ANY LOSS OF USE OR DATA; INTERRUPTION OF BUSINESS; OR ANY INDIRECT; SPECIAL; INCIDENTAL; OR CONSEQUENTIAL DAMAGES OF ANY KIND (INCLUDING LOST PROFITS); AND (II) ANY DIRECT DAMAGES EXCEEDING THE TOTAL AMOUNT OF ONE THOUSAND US DOLLARS ($1,000). THE FOREGOING PROVISIONS LIMITING THE LIABILITY OF LICENSOR SHALL APPLY REGARDLESS OF THE FORM OR CAUSE OF ACTION, WHETHER IN STRICT LIABILITY, CONTRACT OR TORT.
**Proprietary Rights; No Other Rights** **Proprietary Rights; No Other Rights**
* **Ownership:** Licensor retains sole and exclusive ownership of all rights, interests and title in the Software and any scripts, processes, techniques, methodologies, inventions, know-how, concepts, formatting, arrangements, visual attributes, ideas, database rights, copyrights, patents, trade secrets, and other intellectual property related thereto, and all derivatives, enhancements, modifications and improvements thereof. Except for the limited license rights granted herein, Licensee has no rights in or to the Software and/ or Licensors trademarks, logo, or branding and You acknowledge that such Software, trademarks, logo, or branding is the sole property of Licensor. * **Ownership:** Licensor retains sole and exclusive ownership of all rights, interests and title in the Software and any scripts, processes, techniques, methodologies, inventions, know-how, concepts, formatting, arrangements, visual attributes, ideas, database rights, copyrights, patents, trade secrets, and other intellectual property related thereto, and all derivatives, enhancements, modifications and improvements thereof. Except for the limited license rights granted herein, Licensee has no rights in or to the Software and/ or Licensors trademarks, logo, or branding and You acknowledge that such Software, trademarks, logo, or branding is the sole property of Licensor.
* **Feedback:** Licensee is not required to provide any suggestions, enhancement requests, recommendations or other feedback regarding the Software ("Feedback"). If, notwithstanding this policy, Licensee submits Feedback, Licensee understands and acknowledges that such Feedback is not submitted in confidence and Licensor assumes no obligation, expressed or implied, by considering it. All right in any trademark or logo of Licensor or its Affiliates and You shall make no claim of right to the Software or any part thereof to be supplied by Licensor hereunder and acknowledges that as between Licensor and You, such Software is the sole proprietary, title and interest in and to Licensor.such Feedback shall be assigned to, and shall become the sole and exclusive property of, Licensor upon its creation. * **Feedback:** Licensee is not required to provide any suggestions, enhancement requests, recommendations or other feedback regarding the Software ("Feedback"). If, notwithstanding this policy, Licensee submits Feedback, Licensee understands and acknowledges that such Feedback is not submitted in confidence and Licensor assumes no obligation, expressed or implied, by considering it. All right in any trademark or logo of Licensor or its affiliates and You shall make no claim of right to the Software or any part thereof to be supplied by Licensor hereunder and acknowledges that as between Licensor and You, such Software is the sole proprietary, title and interest in and to Licensor.such Feedback shall be assigned to, and shall become the sole and exclusive property of, Licensor upon its creation.
* Except for the rights expressly granted to You under this Agreement, You are not granted any other licenses or rights in the Software or otherwise. This Agreement constitutes the entire agreement between You and the Licensor with respect to the subject matter hereof and supersedes all prior or contemporaneous communications, representations, or agreements, whether oral or written. * Except for the rights expressly granted to You under this Agreement, You are not granted any other licenses or rights in the Software or otherwise. This Agreement constitutes the entire agreement between You and the Licensor with respect to the subject matter hereof and supersedes all prior or contemporaneous communications, representations, or agreements, whether oral or written.
* **Third-Party Software:** Customer acknowledges that the Software may contain open and closed source components (“OSS Components”) that are governed separately by certain licenses, in each case as further provided by Company upon request. Any applicable OSS Component license is solely between Licensee and the applicable licensor of the OSS Component and Licensee shall comply with the applicable OSS Component license. * **Third-Party Software:** Customer acknowledges that the Software may contain open and closed source components (“OSS Components”) that are governed separately by certain licenses, in each case as further provided by Company upon request. Any applicable OSS Component license is solely between Licensee and the applicable licensor of the OSS Component and Licensee shall comply with the applicable OSS Component license.
* If any provision of this Agreement is held to be invalid or unenforceable, such provision shall be struck and the remaining provisions shall remain in full force and effect. * If any provision of this Agreement is held to be invalid or unenforceable, such provision shall be struck and the remaining provisions shall remain in full force and effect.
@@ -84,7 +56,7 @@ For the purposes of this Agreement an "**Affiliate**" means any entity that dire
**Miscellaneous** **Miscellaneous**
* **Miscellaneous:** This Agreement may be modified at any time by Licensor, and constitutes the entire agreement between the parties with respect to the subject matter hereof. Licensee may not assign or subcontract its rights or obligations under this Agreement. This Agreement does not, and shall not be construed to create any relationship, partnership, joint venture, employer-employee, agency, or franchisor-franchisee relationship between the parties. * **Miscellaneous:** This Agreement may be modified at any time by Licensor, and constitutes the entire agreement between the parties with respect to the subject matter hereof. Licensee may not assign or subcontract its rights or obligations under this Agreement. This Agreement does not, and shall not be construed to create any relationship, partnership, joint venture, employer-employee, agency, or franchisor-franchisee relationship between the parties.
* **Modifications**: Licensor reserves the right to modify this Agreement at any time. Changes will be effective upon posting to the Website or within the Software repository. Continued use of the Software after such changes constitutes acceptance.
* **Governing Law & Jurisdiction:** This Agreement shall be governed and construed in accordance with the laws of Israel, without giving effect to their respective conflicts of laws provisions, and the competent courts situated in Tel Aviv, Israel, shall have sole and exclusive jurisdiction over the parties and any conflict and/or dispute arising out of, or in connection to, this Agreement * **Governing Law & Jurisdiction:** This Agreement shall be governed and construed in accordance with the laws of Israel, without giving effect to their respective conflicts of laws provisions, and the competent courts situated in Tel Aviv, Israel, shall have sole and exclusive jurisdiction over the parties and any conflict and/or dispute arising out of, or in connection to, this Agreement
\[*End of ScyllaDB Software License Agreement*\] \[*End of ScyllaDB Software License Agreement*\]

View File

@@ -43,7 +43,7 @@ For further information, please see:
[developer documentation]: HACKING.md [developer documentation]: HACKING.md
[build documentation]: docs/dev/building.md [build documentation]: docs/dev/building.md
[docker image build documentation]: dist/docker/redhat/README.md [docker image build documentation]: dist/docker/debian/README.md
## Running Scylla ## Running Scylla

View File

@@ -78,7 +78,7 @@ fi
# Default scylla product/version tags # Default scylla product/version tags
PRODUCT=scylla PRODUCT=scylla
VERSION=2026.2.0-dev VERSION=2026.1.0-dev
if test -f version if test -f version
then then

2
abseil

Submodule abseil updated: 255c84dadd...d7aaad83b4

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "absl-flat_hash_map.hh" #include "absl-flat_hash_map.hh"

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -9,8 +9,6 @@ target_sources(alternator
controller.cc controller.cc
server.cc server.cc
executor.cc executor.cc
executor_read.cc
executor_util.cc
stats.cc stats.cc
serialization.cc serialization.cc
expressions.cc expressions.cc

View File

@@ -1,253 +0,0 @@
/*
* Copyright 2019-present ScyllaDB
*/
/*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
*/
#pragma once
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <variant>
#include "utils/rjson.hh"
#include "utils/overloaded_functor.hh"
#include "alternator/error.hh"
#include "alternator/expressions_types.hh"
namespace alternator {
// An attribute_path_map object is used to hold data for various attributes
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
// has a root attribute, and then modified by member and index operators -
// for example in "a.b[2].c" we have "a" as the root, then ".b" member, then
// "[2]" index, and finally ".c" member.
// Data can be added to an attribute_path_map using the add() function, but
// requires that attributes with data not be *overlapping* or *conflicting*:
//
// 1. Two attribute paths which are identical or an ancestor of one another
// are considered *overlapping* and not allowed. If a.b.c has data,
// we can't add more data in a.b.c or any of its descendants like a.b.c.d.
//
// 2. Two attribute paths which need the same parent to have both a member and
// an index are considered *conflicting* and not allowed. E.g., if a.b has
// data, you can't add a[1]. The meaning of adding both would be that the
// attribute a is both a map and an array, which isn't sensible.
//
// These two requirements are common to the two places where Alternator uses
// this abstraction to describe how a hierarchical item is to be transformed:
//
// 1. In ProjectExpression: for filtering from a full top-level attribute
// only the parts for which user asked in ProjectionExpression.
//
// 2. In UpdateExpression: for taking the previous value of a top-level
// attribute, and modifying it based on the instructions in the user
// wrote in UpdateExpression.
template<typename T>
class attribute_path_map_node {
public:
using data_t = T;
// We need the extra unique_ptr<> here because libstdc++ unordered_map
// doesn't work with incomplete types :-(
using members_t = std::unordered_map<std::string, std::unique_ptr<attribute_path_map_node<T>>>;
// The indexes list is sorted because DynamoDB requires handling writes
// beyond the end of a list in index order.
using indexes_t = std::map<unsigned, std::unique_ptr<attribute_path_map_node<T>>>;
// The prohibition on "overlap" and "conflict" explained above means
// That only one of data, members or indexes is non-empty.
std::optional<std::variant<data_t, members_t, indexes_t>> _content;
bool is_empty() const { return !_content; }
bool has_value() const { return _content && std::holds_alternative<data_t>(*_content); }
bool has_members() const { return _content && std::holds_alternative<members_t>(*_content); }
bool has_indexes() const { return _content && std::holds_alternative<indexes_t>(*_content); }
// get_members() assumes that has_members() is true
members_t& get_members() { return std::get<members_t>(*_content); }
const members_t& get_members() const { return std::get<members_t>(*_content); }
indexes_t& get_indexes() { return std::get<indexes_t>(*_content); }
const indexes_t& get_indexes() const { return std::get<indexes_t>(*_content); }
T& get_value() { return std::get<T>(*_content); }
const T& get_value() const { return std::get<T>(*_content); }
};
template<typename T>
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
// attrs_to_get lists which top-level attribute are needed, and possibly also
// which part of the top-level attribute is really needed (when nested
// attribute paths appeared in the query).
// Most code actually uses optional<attrs_to_get>. There, a disengaged
// optional means we should get all attributes, not specific ones.
using attrs_to_get = attribute_path_map<std::monostate>;
// takes a given JSON value and drops its parts which weren't asked to be
// kept. It modifies the given JSON value, or returns false to signify that
// the entire object should be dropped.
// Note that The JSON value is assumed to be encoded using the DynamoDB
// conventions - i.e., it is really a map whose key has a type string,
// and the value is the real object.
template<typename T>
bool hierarchy_filter(rjson::value& val, const attribute_path_map_node<T>& h) {
if (!val.IsObject() || val.MemberCount() != 1) {
// This shouldn't happen. We shouldn't have stored malformed objects.
// But today Alternator does not validate the structure of nested
// documents before storing them, so this can happen on read.
throw api_error::internal(format("Malformed value object read: {}", val));
}
const char* type = val.MemberBegin()->name.GetString();
rjson::value& v = val.MemberBegin()->value;
if (h.has_members()) {
const auto& members = h.get_members();
if (type[0] != 'M' || !v.IsObject()) {
// If v is not an object (dictionary, map), none of the members
// can match.
return false;
}
rjson::value newv = rjson::empty_object();
for (auto it = v.MemberBegin(); it != v.MemberEnd(); ++it) {
std::string attr = rjson::to_string(it->name);
auto x = members.find(attr);
if (x != members.end()) {
if (x->second) {
// Only a part of this attribute is to be filtered, do it.
if (hierarchy_filter(it->value, *x->second)) {
// because newv started empty and attr are unique
// (keys of v), we can use add() here
rjson::add_with_string_name(newv, attr, std::move(it->value));
}
} else {
// The entire attribute is to be kept
rjson::add_with_string_name(newv, attr, std::move(it->value));
}
}
}
if (newv.MemberCount() == 0) {
return false;
}
v = newv;
} else if (h.has_indexes()) {
const auto& indexes = h.get_indexes();
if (type[0] != 'L' || !v.IsArray()) {
return false;
}
rjson::value newv = rjson::empty_array();
const auto& a = v.GetArray();
for (unsigned i = 0; i < v.Size(); i++) {
auto x = indexes.find(i);
if (x != indexes.end()) {
if (x->second) {
if (hierarchy_filter(a[i], *x->second)) {
rjson::push_back(newv, std::move(a[i]));
}
} else {
// The entire attribute is to be kept
rjson::push_back(newv, std::move(a[i]));
}
}
}
if (newv.Size() == 0) {
return false;
}
v = newv;
}
return true;
}
// Add a path to an attribute_path_map. Throws a validation error if the path
// "overlaps" with one already in the filter (one is a sub-path of the other)
// or "conflicts" with it (both a member and index is requested).
template<typename T>
void attribute_path_map_add(const char* source, attribute_path_map<T>& map, const parsed::path& p, T value = {}) {
using node = attribute_path_map_node<T>;
// The first step is to look for the top-level attribute (p.root()):
auto it = map.find(p.root());
if (it == map.end()) {
if (p.has_operators()) {
it = map.emplace(p.root(), node {std::nullopt}).first;
} else {
(void) map.emplace(p.root(), node {std::move(value)}).first;
// Value inserted for top-level node. We're done.
return;
}
} else if(!p.has_operators()) {
// If p is top-level and we already have it or a part of it
// in map, it's a forbidden overlapping path.
throw api_error::validation(fmt::format(
"Invalid {}: two document paths overlap at {}", source, p.root()));
} else if (it->second.has_value()) {
// If we're here, it != map.end() && p.has_operators && it->second.has_value().
// This means the top-level attribute already has a value, and we're
// trying to add a non-top-level value. It's an overlap.
throw api_error::validation(fmt::format("Invalid {}: two document paths overlap at {}", source, p.root()));
}
node* h = &it->second;
// The second step is to walk h from the top-level node to the inner node
// where we're supposed to insert the value:
for (const auto& op : p.operators()) {
std::visit(overloaded_functor {
[&] (const std::string& member) {
if (h->is_empty()) {
*h = node {typename node::members_t()};
} else if (h->has_indexes()) {
throw api_error::validation(format("Invalid {}: two document paths conflict at {}", source, p));
} else if (h->has_value()) {
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p));
}
typename node::members_t& members = h->get_members();
auto it = members.find(member);
if (it == members.end()) {
it = members.insert({member, std::make_unique<node>()}).first;
}
h = it->second.get();
},
[&] (unsigned index) {
if (h->is_empty()) {
*h = node {typename node::indexes_t()};
} else if (h->has_members()) {
throw api_error::validation(format("Invalid {}: two document paths conflict at {}", source, p));
} else if (h->has_value()) {
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p));
}
typename node::indexes_t& indexes = h->get_indexes();
auto it = indexes.find(index);
if (it == indexes.end()) {
it = indexes.insert({index, std::make_unique<node>()}).first;
}
h = it->second.get();
}
}, op);
}
// Finally, insert the value in the node h.
if (h->is_empty()) {
*h = node {std::move(value)};
} else {
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p));
}
}
// A very simplified version of the above function for the special case of
// adding only top-level attribute. It's not only simpler, we also use a
// different error message, referring to a "duplicate attribute" instead of
// "overlapping paths". DynamoDB also has this distinction (errors in
// AttributesToGet refer to duplicates, not overlaps, but errors in
// ProjectionExpression refer to overlap - even if it's an exact duplicate).
template<typename T>
void attribute_path_map_add(const char* source, attribute_path_map<T>& map, const std::string& attr, T value = {}) {
using node = attribute_path_map_node<T>;
auto it = map.find(attr);
if (it == map.end()) {
map.emplace(attr, node {std::move(value)});
} else {
throw api_error::validation(fmt::format(
"Invalid {}: Duplicate attribute: {}", source, attr));
}
}
} // namespace alternator

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "alternator/error.hh" #include "alternator/error.hh"
@@ -13,8 +13,7 @@
#include <string_view> #include <string_view>
#include "alternator/auth.hh" #include "alternator/auth.hh"
#include <fmt/format.h> #include <fmt/format.h>
#include "db/consistency_level_type.hh" #include "auth/password_authenticator.hh"
#include "db/system_keyspace.hh"
#include "service/storage_proxy.hh" #include "service/storage_proxy.hh"
#include "alternator/executor.hh" #include "alternator/executor.hh"
#include "cql3/selection/selection.hh" #include "cql3/selection/selection.hh"
@@ -26,8 +25,8 @@ namespace alternator {
static logging::logger alogger("alternator-auth"); static logging::logger alogger("alternator-auth");
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username) { future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::service& as, std::string username) {
schema_ptr schema = proxy.data_dictionary().find_schema(db::system_keyspace::NAME, "roles"); schema_ptr schema = proxy.data_dictionary().find_schema(auth::get_auth_ks_name(as.query_processor()), "roles");
partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username)); partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username));
dht::partition_range_vector partition_ranges{dht::partition_range(dht::decorate_key(*schema, pk))}; dht::partition_range_vector partition_ranges{dht::partition_range(dht::decorate_key(*schema, pk))};
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()}; std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
@@ -40,7 +39,7 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::strin
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id, can_login_col->id}, selection->get_query_options()); auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id, can_login_col->id}, selection->get_query_options());
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice,
proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit())); proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
auto cl = db::consistency_level::LOCAL_ONE; auto cl = auth::password_authenticator::consistency_for_user(username);
service::client_state client_state{service::client_state::internal_tag()}; service::client_state client_state{service::client_state::internal_tag()};
service::storage_proxy::coordinator_query_result qr = co_await proxy.query(schema, std::move(command), std::move(partition_ranges), cl, service::storage_proxy::coordinator_query_result qr = co_await proxy.query(schema, std::move(command), std::move(partition_ranges), cl,

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -20,6 +20,6 @@ namespace alternator {
using key_cache = utils::loading_cache<std::string, std::string, 1>; using key_cache = utils::loading_cache<std::string, std::string, 1>;
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username); future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::service& as, std::string username);
} }

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include <string_view> #include <string_view>
@@ -618,7 +618,7 @@ conditional_operator_type get_conditional_operator(const rjson::value& req) {
// Check if the existing values of the item (previous_item) match the // Check if the existing values of the item (previous_item) match the
// conditions given by the Expected and ConditionalOperator parameters // conditions given by the Expected and ConditionalOperator parameters
// (if they exist) in the request (an UpdateItem, PutItem or DeleteItem). // (if they exist) in the request (an UpdateItem, PutItem or DeleteItem).
// This function can throw a ValidationException API error if there // This function can throw an ValidationException API error if there
// are errors in the format of the condition itself. // are errors in the format of the condition itself.
bool verify_expected(const rjson::value& req, const rjson::value* previous_item) { bool verify_expected(const rjson::value& req, const rjson::value* previous_item) {
const rjson::value* expected = rjson::find(req, "Expected"); const rjson::value* expected = rjson::find(req, "Expected");

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
/* /*

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "consumed_capacity.hh" #include "consumed_capacity.hh"
@@ -45,7 +45,7 @@ bool consumed_capacity_counter::should_add_capacity(const rjson::value& request)
} }
void consumed_capacity_counter::add_consumed_capacity_to_response_if_needed(rjson::value& response) const noexcept { void consumed_capacity_counter::add_consumed_capacity_to_response_if_needed(rjson::value& response) const noexcept {
if (_should_add_to_response) { if (_should_add_to_reponse) {
auto consumption = rjson::empty_object(); auto consumption = rjson::empty_object();
rjson::add(consumption, "CapacityUnits", get_consumed_capacity_units()); rjson::add(consumption, "CapacityUnits", get_consumed_capacity_units());
rjson::add(response, "ConsumedCapacity", std::move(consumption)); rjson::add(response, "ConsumedCapacity", std::move(consumption));

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -28,9 +28,9 @@ namespace alternator {
class consumed_capacity_counter { class consumed_capacity_counter {
public: public:
consumed_capacity_counter() = default; consumed_capacity_counter() = default;
consumed_capacity_counter(bool should_add_to_response) : _should_add_to_response(should_add_to_response){} consumed_capacity_counter(bool should_add_to_reponse) : _should_add_to_reponse(should_add_to_reponse){}
bool operator()() const noexcept { bool operator()() const noexcept {
return _should_add_to_response; return _should_add_to_reponse;
} }
consumed_capacity_counter& operator +=(uint64_t bytes); consumed_capacity_counter& operator +=(uint64_t bytes);
@@ -44,7 +44,7 @@ public:
uint64_t _total_bytes = 0; uint64_t _total_bytes = 0;
static bool should_add_capacity(const rjson::value& request); static bool should_add_capacity(const rjson::value& request);
protected: protected:
bool _should_add_to_response = false; bool _should_add_to_reponse = false;
}; };
class rcu_consumed_capacity_counter : public consumed_capacity_counter { class rcu_consumed_capacity_counter : public consumed_capacity_counter {

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include <seastar/core/with_scheduling_group.hh> #include <seastar/core/with_scheduling_group.hh>
@@ -18,7 +18,6 @@
#include "service/memory_limiter.hh" #include "service/memory_limiter.hh"
#include "auth/service.hh" #include "auth/service.hh"
#include "service/qos/service_level_controller.hh" #include "service/qos/service_level_controller.hh"
#include "vector_search/vector_store_client.hh"
using namespace seastar; using namespace seastar;
@@ -32,12 +31,10 @@ controller::controller(
sharded<service::storage_service>& ss, sharded<service::storage_service>& ss,
sharded<service::migration_manager>& mm, sharded<service::migration_manager>& mm,
sharded<db::system_distributed_keyspace>& sys_dist_ks, sharded<db::system_distributed_keyspace>& sys_dist_ks,
sharded<db::system_keyspace>& sys_ks,
sharded<cdc::generation_service>& cdc_gen_svc, sharded<cdc::generation_service>& cdc_gen_svc,
sharded<service::memory_limiter>& memory_limiter, sharded<service::memory_limiter>& memory_limiter,
sharded<auth::service>& auth_service, sharded<auth::service>& auth_service,
sharded<qos::service_level_controller>& sl_controller, sharded<qos::service_level_controller>& sl_controller,
sharded<vector_search::vector_store_client>& vsc,
const db::config& config, const db::config& config,
seastar::scheduling_group sg) seastar::scheduling_group sg)
: protocol_server(sg) : protocol_server(sg)
@@ -46,12 +43,10 @@ controller::controller(
, _ss(ss) , _ss(ss)
, _mm(mm) , _mm(mm)
, _sys_dist_ks(sys_dist_ks) , _sys_dist_ks(sys_dist_ks)
, _sys_ks(sys_ks)
, _cdc_gen_svc(cdc_gen_svc) , _cdc_gen_svc(cdc_gen_svc)
, _memory_limiter(memory_limiter) , _memory_limiter(memory_limiter)
, _auth_service(auth_service) , _auth_service(auth_service)
, _sl_controller(sl_controller) , _sl_controller(sl_controller)
, _vsc(vsc)
, _config(config) , _config(config)
{ {
} }
@@ -96,8 +91,8 @@ future<> controller::start_server() {
auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> { auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> {
return cfg.alternator_timeout_in_ms; return cfg.alternator_timeout_in_ms;
}; };
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_ss), std::ref(_mm), std::ref(_sys_dist_ks), std::ref(_sys_ks), _executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_ss), std::ref(_mm), std::ref(_sys_dist_ks),
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), std::ref(_vsc), _ssg.value(), sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value(),
sharded_parameter(get_timeout_in_ms, std::ref(_config))).get(); sharded_parameter(get_timeout_in_ms, std::ref(_config))).get();
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get(); _server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
// Note: from this point on, if start_server() throws for any reason, // Note: from this point on, if start_server() throws for any reason,

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -22,7 +22,6 @@ class memory_limiter;
namespace db { namespace db {
class system_distributed_keyspace; class system_distributed_keyspace;
class system_keyspace;
class config; class config;
} }
@@ -44,10 +43,6 @@ namespace qos {
class service_level_controller; class service_level_controller;
} }
namespace vector_search {
class vector_store_client;
}
namespace alternator { namespace alternator {
// This is the official DynamoDB API version. // This is the official DynamoDB API version.
@@ -66,12 +61,10 @@ class controller : public protocol_server {
sharded<service::storage_service>& _ss; sharded<service::storage_service>& _ss;
sharded<service::migration_manager>& _mm; sharded<service::migration_manager>& _mm;
sharded<db::system_distributed_keyspace>& _sys_dist_ks; sharded<db::system_distributed_keyspace>& _sys_dist_ks;
sharded<db::system_keyspace>& _sys_ks;
sharded<cdc::generation_service>& _cdc_gen_svc; sharded<cdc::generation_service>& _cdc_gen_svc;
sharded<service::memory_limiter>& _memory_limiter; sharded<service::memory_limiter>& _memory_limiter;
sharded<auth::service>& _auth_service; sharded<auth::service>& _auth_service;
sharded<qos::service_level_controller>& _sl_controller; sharded<qos::service_level_controller>& _sl_controller;
sharded<vector_search::vector_store_client>& _vsc;
const db::config& _config; const db::config& _config;
std::vector<socket_address> _listen_addresses; std::vector<socket_address> _listen_addresses;
@@ -86,12 +79,10 @@ public:
sharded<service::storage_service>& ss, sharded<service::storage_service>& ss,
sharded<service::migration_manager>& mm, sharded<service::migration_manager>& mm,
sharded<db::system_distributed_keyspace>& sys_dist_ks, sharded<db::system_distributed_keyspace>& sys_dist_ks,
sharded<db::system_keyspace>& sys_ks,
sharded<cdc::generation_service>& cdc_gen_svc, sharded<cdc::generation_service>& cdc_gen_svc,
sharded<service::memory_limiter>& memory_limiter, sharded<service::memory_limiter>& memory_limiter,
sharded<auth::service>& auth_service, sharded<auth::service>& auth_service,
sharded<qos::service_level_controller>& sl_controller, sharded<qos::service_level_controller>& sl_controller,
sharded<vector_search::vector_store_client>& vsc,
const db::config& config, const db::config& config,
seastar::scheduling_group sg); seastar::scheduling_group sg);

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

File diff suppressed because it is too large Load Diff

View File

@@ -3,15 +3,13 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
#include <seastar/core/future.hh> #include <seastar/core/future.hh>
#include "audit/audit.hh"
#include "seastarx.hh" #include "seastarx.hh"
#include <seastar/core/future.hh>
#include <seastar/core/sharded.hh> #include <seastar/core/sharded.hh>
#include <seastar/util/noncopyable_function.hh> #include <seastar/util/noncopyable_function.hh>
@@ -22,23 +20,15 @@
#include "db/config.hh" #include "db/config.hh"
#include "alternator/error.hh" #include "alternator/error.hh"
#include "alternator/attribute_path.hh" #include "stats.hh"
#include "alternator/stats.hh"
#include "alternator/executor_util.hh"
#include "utils/rjson.hh" #include "utils/rjson.hh"
#include "utils/updateable_value.hh" #include "utils/updateable_value.hh"
#include "utils/simple_value_with_expiry.hh"
#include "tracing/trace_state.hh" #include "tracing/trace_state.hh"
namespace db { namespace db {
class system_distributed_keyspace; class system_distributed_keyspace;
class system_keyspace;
}
namespace audit {
class audit_info_alternator;
} }
namespace query { namespace query {
@@ -56,10 +46,6 @@ namespace service {
class storage_service; class storage_service;
} }
namespace vector_search {
class vector_store_client;
}
namespace cdc { namespace cdc {
class metadata; class metadata;
} }
@@ -72,13 +58,82 @@ class gossiper;
class schema_builder; class schema_builder;
namespace alternator { namespace alternator {
enum class table_status; enum class table_status;
class rmw_operation; class rmw_operation;
class put_or_delete_item; class put_or_delete_item;
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
bool is_alternator_keyspace(const sstring& ks_name);
// Wraps the db::get_tags_of_table and throws if the table is missing the tags extension.
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
// An attribute_path_map object is used to hold data for various attributes
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
// has a root attribute, and then modified by member and index operators -
// for example in "a.b[2].c" we have "a" as the root, then ".b" member, then
// "[2]" index, and finally ".c" member.
// Data can be added to an attribute_path_map using the add() function, but
// requires that attributes with data not be *overlapping* or *conflicting*:
//
// 1. Two attribute paths which are identical or an ancestor of one another
// are considered *overlapping* and not allowed. If a.b.c has data,
// we can't add more data in a.b.c or any of its descendants like a.b.c.d.
//
// 2. Two attribute paths which need the same parent to have both a member and
// an index are considered *conflicting* and not allowed. E.g., if a.b has
// data, you can't add a[1]. The meaning of adding both would be that the
// attribute a is both a map and an array, which isn't sensible.
//
// These two requirements are common to the two places where Alternator uses
// this abstraction to describe how a hierarchical item is to be transformed:
//
// 1. In ProjectExpression: for filtering from a full top-level attribute
// only the parts for which user asked in ProjectionExpression.
//
// 2. In UpdateExpression: for taking the previous value of a top-level
// attribute, and modifying it based on the instructions in the user
// wrote in UpdateExpression.
template<typename T>
class attribute_path_map_node {
public:
using data_t = T;
// We need the extra unique_ptr<> here because libstdc++ unordered_map
// doesn't work with incomplete types :-(
using members_t = std::unordered_map<std::string, std::unique_ptr<attribute_path_map_node<T>>>;
// The indexes list is sorted because DynamoDB requires handling writes
// beyond the end of a list in index order.
using indexes_t = std::map<unsigned, std::unique_ptr<attribute_path_map_node<T>>>;
// The prohibition on "overlap" and "conflict" explained above means
// That only one of data, members or indexes is non-empty.
std::optional<std::variant<data_t, members_t, indexes_t>> _content;
bool is_empty() const { return !_content; }
bool has_value() const { return _content && std::holds_alternative<data_t>(*_content); }
bool has_members() const { return _content && std::holds_alternative<members_t>(*_content); }
bool has_indexes() const { return _content && std::holds_alternative<indexes_t>(*_content); }
// get_members() assumes that has_members() is true
members_t& get_members() { return std::get<members_t>(*_content); }
const members_t& get_members() const { return std::get<members_t>(*_content); }
indexes_t& get_indexes() { return std::get<indexes_t>(*_content); }
const indexes_t& get_indexes() const { return std::get<indexes_t>(*_content); }
T& get_value() { return std::get<T>(*_content); }
const T& get_value() const { return std::get<T>(*_content); }
};
template<typename T>
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
// attrs_to_get lists which top-level attribute are needed, and possibly also
// which part of the top-level attribute is really needed (when nested
// attribute paths appeared in the query).
// Most code actually uses optional<attrs_to_get>. There, a disengaged
// optional means we should get all attributes, not specific ones.
using attrs_to_get = attribute_path_map<std::monostate>;
namespace parsed { namespace parsed {
class expression_cache; class expression_cache;
} }
@@ -89,12 +144,9 @@ class executor : public peering_sharded_service<executor> {
service::storage_proxy& _proxy; service::storage_proxy& _proxy;
service::migration_manager& _mm; service::migration_manager& _mm;
db::system_distributed_keyspace& _sdks; db::system_distributed_keyspace& _sdks;
db::system_keyspace& _system_keyspace;
cdc::metadata& _cdc_metadata; cdc::metadata& _cdc_metadata;
vector_search::vector_store_client& _vsc;
utils::updateable_value<bool> _enforce_authorization; utils::updateable_value<bool> _enforce_authorization;
utils::updateable_value<bool> _warn_authorization; utils::updateable_value<bool> _warn_authorization;
seastar::sharded<audit::audit>& _audit;
// An smp_service_group to be used for limiting the concurrency when // An smp_service_group to be used for limiting the concurrency when
// forwarding Alternator request between shards - if necessary for LWT. // forwarding Alternator request between shards - if necessary for LWT.
smp_service_group _ssg; smp_service_group _ssg;
@@ -119,6 +171,7 @@ public:
// is written in chunks to the output_stream. This allows for efficient // is written in chunks to the output_stream. This allows for efficient
// handling of large responses without needing to allocate a large buffer // handling of large responses without needing to allocate a large buffer
// in memory. // in memory.
using body_writer = noncopyable_function<future<>(output_stream<char>&&)>;
using request_return_type = std::variant<std::string, body_writer, api_error>; using request_return_type = std::variant<std::string, body_writer, api_error>;
stats _stats; stats _stats;
// The metric_groups object holds this stat object's metrics registered // The metric_groups object holds this stat object's metrics registered
@@ -133,60 +186,53 @@ public:
service::storage_service& ss, service::storage_service& ss,
service::migration_manager& mm, service::migration_manager& mm,
db::system_distributed_keyspace& sdks, db::system_distributed_keyspace& sdks,
db::system_keyspace& system_keyspace,
cdc::metadata& cdc_metadata, cdc::metadata& cdc_metadata,
vector_search::vector_store_client& vsc,
smp_service_group ssg, smp_service_group ssg,
utils::updateable_value<uint32_t> default_timeout_in_ms); utils::updateable_value<uint32_t> default_timeout_in_ms);
~executor(); ~executor();
future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> delete_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> delete_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> update_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> update_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> put_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> put_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> delete_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> delete_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> update_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> update_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> list_tables(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> list_tables(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> scan(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> scan(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> describe_endpoints(client_state& client_state, service_permit permit, rjson::value request, std::string host_header, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> describe_endpoints(client_state& client_state, service_permit permit, rjson::value request, std::string host_header);
future<request_return_type> batch_write_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> batch_write_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> batch_get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> batch_get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> query(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> query(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
future<request_return_type> tag_resource(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> tag_resource(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> untag_resource(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> untag_resource(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> list_tags_of_resource(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> list_tags_of_resource(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> update_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> update_time_to_live(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> list_streams(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> list_streams(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request);
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request);
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info); future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request);
future<> start(); future<> start();
future<> stop(); future<> stop();
static sstring table_name(const schema&);
static db::timeout_clock::time_point default_timeout(); static db::timeout_clock::time_point default_timeout();
private: private:
static thread_local utils::updateable_value<uint32_t> s_default_timeout_in_ms; static thread_local utils::updateable_value<uint32_t> s_default_timeout_in_ms;
public:
static schema_ptr find_table(service::storage_proxy&, std::string_view table_name);
static schema_ptr find_table(service::storage_proxy&, const rjson::value& request);
private:
friend class rmw_operation; friend class rmw_operation;
// Helper to set up auditing for an Alternator operation. Checks whether static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr, const std::map<sstring, sstring> *tags = nullptr);
// the operation should be audited (via will_log()) and if so, allocates
// and populates audit_info. No allocation occurs when auditing is disabled.
void maybe_audit(std::unique_ptr<audit::audit_info_alternator>& audit_info,
audit::statement_category category,
std::string_view ks_name,
std::string_view table_name,
std::string_view operation_name,
const rjson::value& request,
std::optional<db::consistency_level> cl = std::nullopt);
future<rjson::value> fill_table_description(schema_ptr schema, table_status tbl_status, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit); future<rjson::value> fill_table_description(schema_ptr schema, table_status tbl_status, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit);
future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, bool enforce_authorization, future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, bool enforce_authorization, bool warn_authorization, const db::tablets_mode_t::mode tablets_mode);
bool warn_authorization, const db::tablets_mode_t::mode tablets_mode, std::unique_ptr<audit::audit_info_alternator>& audit_info);
future<> do_batch_write( future<> do_batch_write(
std::vector<std::pair<schema_ptr, put_or_delete_item>> mutation_builders, std::vector<std::pair<schema_ptr, put_or_delete_item>> mutation_builders,
@@ -199,34 +245,60 @@ private:
tracing::trace_state_ptr trace_state, service_permit permit); tracing::trace_state_ptr trace_state, service_permit permit);
public: public:
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&, const std::map<sstring, sstring> *tags = nullptr);
static std::optional<rjson::value> describe_single_item(schema_ptr,
const query::partition_slice&,
const cql3::selection::selection&,
const query::result&,
const std::optional<attrs_to_get>&,
uint64_t* = nullptr);
// Converts a multi-row selection result to JSON compatible with DynamoDB.
// For each row, this method calls item_callback, which takes the size of
// the item as the parameter.
static future<std::vector<rjson::value>> describe_multi_item(schema_ptr schema,
const query::partition_slice&& slice,
shared_ptr<cql3::selection::selection> selection,
foreign_ptr<lw_shared_ptr<query::result>> query_result,
shared_ptr<const std::optional<attrs_to_get>> attrs_to_get,
noncopyable_function<void(uint64_t)> item_callback = {});
static void describe_single_item(const cql3::selection::selection&,
const std::vector<managed_bytes_opt>&,
const std::optional<attrs_to_get>&,
rjson::value&,
uint64_t* item_length_in_bytes = nullptr,
bool = false);
static bool add_stream_options(const rjson::value& stream_spec, schema_builder&, service::storage_proxy& sp); static bool add_stream_options(const rjson::value& stream_spec, schema_builder&, service::storage_proxy& sp);
static void supplement_table_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp); static void supplement_table_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
static void supplement_table_stream_info(rjson::value& descr, const schema& schema, const service::storage_proxy& sp); static void supplement_table_stream_info(rjson::value& descr, const schema& schema, const service::storage_proxy& sp);
}; };
// returns table creation time in seconds since epoch for `db_clock` // is_big() checks approximately if the given JSON value is "bigger" than
double get_table_creation_time(const schema &schema); // the given big_size number of bytes. The goal is to *quickly* detect
// oversized JSON that, for example, is too large to be serialized to a
// contiguous string - we don't need an accurate size for that. Moreover,
// as soon as we detect that the JSON is indeed "big", we can return true
// and don't need to continue calculating its exact size.
// For simplicity, we use a recursive implementation. This is fine because
// Alternator limits the depth of JSONs it reads from inputs, and doesn't
// add more than a couple of levels in its own output construction.
bool is_big(const rjson::value& val, int big_size = 100'000);
// result of parsing ARN (Amazon Resource Name) // Check CQL's Role-Based Access Control (RBAC) permission (MODIFY,
// ARN format is `arn:<partition>:<service>:<region>:<account-id>:<resource-type>/<resource-id>/<postfix>` // SELECT, DROP, etc.) on the given table. When permission is denied an
// we ignore partition, service and account-id // appropriate user-readable api_error::access_denied is thrown.
// resource-type must be string "table" future<> verify_permission(bool enforce_authorization, bool warn_authorization, const service::client_state&, const schema_ptr&, auth::permission, alternator::stats& stats);
// resource-id will be returned as table_name
// region will be returned as keyspace_name /**
// postfix is a string after resource-id and will be returned as is (whole), including separator. * Make return type for serializing the object "streamed",
struct arn_parts { * i.e. direct to HTTP output stream. Note: only useful for
std::string_view keyspace_name; * (very) large objects as there are overhead issues with this
std::string_view table_name; * as well, but for massive lists of return objects this can
std::string_view postfix; * help avoid large allocations/many re-allocs
}; */
// arn - arn to parse executor::body_writer make_streamed(rjson::value&&);
// arn_field_name - identifier of the ARN, used only when reporting an error (in error messages), for example "Incorrect resource identifier `<arn_field_name>`"
// type_name - used only when reporting an error (in error messages), for example "... is not a valid <type_name> ARN ..."
// expected_postfix - optional filter of postfix value (part of ARN after resource-id, including separator, see comments for struct arn_parts).
// If is empty - then postfix value must be empty as well
// if not empty - postfix value must start with expected_postfix, but might be longer
arn_parts parse_arn(std::string_view arn, std::string_view arn_field_name, std::string_view type_name, std::string_view expected_postfix);
// The format is ks1|ks2|ks3... and table1|table2|table3...
sstring print_names_for_audit(const std::set<sstring>& names);
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,559 +0,0 @@
/*
* Copyright 2019-present ScyllaDB
*/
/*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
*/
#include "alternator/executor_util.hh"
#include "alternator/executor.hh"
#include "alternator/error.hh"
#include "auth/resource.hh"
#include "auth/service.hh"
#include "cdc/log.hh"
#include "data_dictionary/data_dictionary.hh"
#include "db/tags/utils.hh"
#include "replica/database.hh"
#include "cql3/selection/selection.hh"
#include "cql3/result_set.hh"
#include "serialization.hh"
#include "service/storage_proxy.hh"
#include "types/map.hh"
#include <fmt/format.h>
namespace alternator {
extern logging::logger elogger; // from executor.cc
std::optional<int> get_int_attribute(const rjson::value& value, std::string_view attribute_name) {
const rjson::value* attribute_value = rjson::find(value, attribute_name);
if (!attribute_value)
return {};
if (!attribute_value->IsInt()) {
throw api_error::validation(fmt::format("Expected integer value for attribute {}, got: {}",
attribute_name, value));
}
return attribute_value->GetInt();
}
std::string get_string_attribute(const rjson::value& value, std::string_view attribute_name, const char* default_return) {
const rjson::value* attribute_value = rjson::find(value, attribute_name);
if (!attribute_value)
return default_return;
if (!attribute_value->IsString()) {
throw api_error::validation(fmt::format("Expected string value for attribute {}, got: {}",
attribute_name, value));
}
return rjson::to_string(*attribute_value);
}
bool get_bool_attribute(const rjson::value& value, std::string_view attribute_name, bool default_return) {
const rjson::value* attribute_value = rjson::find(value, attribute_name);
if (!attribute_value) {
return default_return;
}
if (!attribute_value->IsBool()) {
throw api_error::validation(fmt::format("Expected boolean value for attribute {}, got: {}",
attribute_name, value));
}
return attribute_value->GetBool();
}
std::optional<std::string> find_table_name(const rjson::value& request) {
const rjson::value* table_name_value = rjson::find(request, "TableName");
if (!table_name_value) {
return std::nullopt;
}
if (!table_name_value->IsString()) {
throw api_error::validation("Non-string TableName field in request");
}
std::string table_name = rjson::to_string(*table_name_value);
return table_name;
}
std::string get_table_name(const rjson::value& request) {
auto name = find_table_name(request);
if (!name) {
throw api_error::validation("Missing TableName field in request");
}
return *name;
}
schema_ptr find_table(service::storage_proxy& proxy, const rjson::value& request) {
auto table_name = find_table_name(request);
if (!table_name) {
return nullptr;
}
return find_table(proxy, *table_name);
}
schema_ptr find_table(service::storage_proxy& proxy, std::string_view table_name) {
try {
return proxy.data_dictionary().find_schema(sstring(executor::KEYSPACE_NAME_PREFIX) + sstring(table_name), table_name);
} catch(data_dictionary::no_such_column_family&) {
// DynamoDB returns validation error even when table does not exist
// and the table name is invalid.
validate_table_name(table_name);
throw api_error::resource_not_found(
fmt::format("Requested resource not found: Table: {} not found", table_name));
}
}
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request) {
auto schema = find_table(proxy, request);
if (!schema) {
// if we get here then the name was missing, since syntax or missing actual CF
// checks throw. Slow path, but just call get_table_name to generate exception.
get_table_name(request);
}
return schema;
}
map_type attrs_type() {
static thread_local auto t = map_type_impl::get_instance(utf8_type, bytes_type, true);
return t;
}
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema) {
auto tags_ptr = db::get_tags_of_table(schema);
if (tags_ptr) {
return *tags_ptr;
} else {
throw api_error::validation(format("Table {} does not have valid tagging information", schema->ks_name()));
}
}
bool is_alternator_keyspace(std::string_view ks_name) {
return ks_name.starts_with(executor::KEYSPACE_NAME_PREFIX);
}
// This tag is set on a GSI when the user did not specify a range key, causing
// Alternator to add the base table's range key as a spurious range key. It is
// used by describe_key_schema() to suppress reporting that key.
extern const sstring SPURIOUS_RANGE_KEY_ADDED_TO_GSI_AND_USER_DIDNT_SPECIFY_RANGE_KEY_TAG_KEY;
void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string, std::string>* attribute_types, const std::map<sstring, sstring>* tags) {
rjson::value key_schema = rjson::empty_array();
const bool ignore_range_keys_as_spurious = tags != nullptr && tags->contains(SPURIOUS_RANGE_KEY_ADDED_TO_GSI_AND_USER_DIDNT_SPECIFY_RANGE_KEY_TAG_KEY);
for (const column_definition& cdef : schema.partition_key_columns()) {
rjson::value key = rjson::empty_object();
rjson::add(key, "AttributeName", rjson::from_string(cdef.name_as_text()));
rjson::add(key, "KeyType", "HASH");
rjson::push_back(key_schema, std::move(key));
if (attribute_types) {
(*attribute_types)[cdef.name_as_text()] = type_to_string(cdef.type);
}
}
if (!ignore_range_keys_as_spurious) {
// NOTE: user requested key (there can be at most one) will always come first.
// There might be more keys following it, which were added, but those were
// not requested by the user, so we ignore them.
for (const column_definition& cdef : schema.clustering_key_columns()) {
rjson::value key = rjson::empty_object();
rjson::add(key, "AttributeName", rjson::from_string(cdef.name_as_text()));
rjson::add(key, "KeyType", "RANGE");
rjson::push_back(key_schema, std::move(key));
if (attribute_types) {
(*attribute_types)[cdef.name_as_text()] = type_to_string(cdef.type);
}
break;
}
}
rjson::add(parent, "KeySchema", std::move(key_schema));
}
// Check if the given string has valid characters for a table name, i.e. only
// a-z, A-Z, 0-9, _ (underscore), - (dash), . (dot). Note that this function
// does not check the length of the name - instead, use validate_table_name()
// to validate both the characters and the length.
static bool valid_table_name_chars(std::string_view name) {
for (auto c : name) {
if ((c < 'a' || c > 'z') &&
(c < 'A' || c > 'Z') &&
(c < '0' || c > '9') &&
c != '_' &&
c != '-' &&
c != '.') {
return false;
}
}
return true;
}
std::string view_name(std::string_view table_name, std::string_view index_name, const std::string& delim, bool validate_len) {
if (index_name.length() < 3) {
throw api_error::validation("IndexName must be at least 3 characters long");
}
if (!valid_table_name_chars(index_name)) {
throw api_error::validation(
fmt::format("IndexName '{}' must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", index_name));
}
std::string ret = std::string(table_name) + delim + std::string(index_name);
if (ret.length() > max_auxiliary_table_name_length && validate_len) {
throw api_error::validation(
fmt::format("The total length of TableName ('{}') and IndexName ('{}') cannot exceed {} characters",
table_name, index_name, max_auxiliary_table_name_length - delim.size()));
}
return ret;
}
std::string gsi_name(std::string_view table_name, std::string_view index_name, bool validate_len) {
return view_name(table_name, index_name, ":", validate_len);
}
std::string lsi_name(std::string_view table_name, std::string_view index_name, bool validate_len) {
return view_name(table_name, index_name, "!:", validate_len);
}
void check_key(const rjson::value& key, const schema_ptr& schema) {
if (key.MemberCount() != (schema->clustering_key_size() == 0 ? 1 : 2)) {
throw api_error::validation("Given key attribute not in schema");
}
}
void verify_all_are_used(const rjson::value* field,
const std::unordered_set<std::string>& used, const char* field_name, const char* operation) {
if (!field) {
return;
}
for (auto it = field->MemberBegin(); it != field->MemberEnd(); ++it) {
if (!used.contains(rjson::to_string(it->name))) {
throw api_error::validation(
format("{} has spurious '{}', not used in {}",
field_name, rjson::to_string_view(it->name), operation));
}
}
}
// This function increments the authorization_failures counter, and may also
// log a warn-level message and/or throw an access_denied exception, depending
// on what enforce_authorization and warn_authorization are set to.
// Note that if enforce_authorization is false, this function will return
// without throwing. So a caller that doesn't want to continue after an
// authorization_error must explicitly return after calling this function.
static void authorization_error(stats& stats, bool enforce_authorization, bool warn_authorization, std::string msg) {
stats.authorization_failures++;
if (enforce_authorization) {
if (warn_authorization) {
elogger.warn("alternator_warn_authorization=true: {}", msg);
}
throw api_error::access_denied(std::move(msg));
} else {
if (warn_authorization) {
elogger.warn("If you set alternator_enforce_authorization=true the following will be enforced: {}", msg);
}
}
}
future<> verify_permission(
bool enforce_authorization,
bool warn_authorization,
const service::client_state& client_state,
const schema_ptr& schema,
auth::permission permission_to_check,
stats& stats) {
if (!enforce_authorization && !warn_authorization) {
co_return;
}
// Unfortunately, the fix for issue #23218 did not modify the function
// that we use here - check_has_permissions(). So if we want to allow
// writes to internal tables (from try_get_internal_table()) only to a
// superuser, we need to explicitly check it here.
if (permission_to_check == auth::permission::MODIFY && is_internal_keyspace(schema->ks_name())) {
if (!client_state.user() ||
!client_state.user()->name ||
!co_await client_state.get_auth_service()->underlying_role_manager().is_superuser(*client_state.user()->name)) {
sstring username = "<anonymous>";
if (client_state.user() && client_state.user()->name) {
username = client_state.user()->name.value();
}
authorization_error(stats, enforce_authorization, warn_authorization, fmt::format(
"Write access denied on internal table {}.{} to role {} because it is not a superuser",
schema->ks_name(), schema->cf_name(), username));
co_return;
}
}
auto resource = auth::make_data_resource(schema->ks_name(), schema->cf_name());
if (!client_state.user() || !client_state.user()->name ||
!co_await client_state.check_has_permission(auth::command_desc(permission_to_check, resource))) {
sstring username = "<anonymous>";
if (client_state.user() && client_state.user()->name) {
username = client_state.user()->name.value();
}
// Using exceptions for errors makes this function faster in the
// success path (when the operation is allowed).
authorization_error(stats, enforce_authorization, warn_authorization, fmt::format(
"{} access on table {}.{} is denied to role {}, client address {}",
auth::permissions::to_string(permission_to_check),
schema->ks_name(), schema->cf_name(), username, client_state.get_client_address()));
}
}
// Similar to verify_permission() above, but just for CREATE operations.
// Those do not operate on any specific table, so require permissions on
// ALL KEYSPACES instead of any specific table.
future<> verify_create_permission(bool enforce_authorization, bool warn_authorization, const service::client_state& client_state, stats& stats) {
if (!enforce_authorization && !warn_authorization) {
co_return;
}
auto resource = auth::resource(auth::resource_kind::data);
if (!co_await client_state.check_has_permission(auth::command_desc(auth::permission::CREATE, resource))) {
sstring username = "<anonymous>";
if (client_state.user() && client_state.user()->name) {
username = client_state.user()->name.value();
}
authorization_error(stats, enforce_authorization, warn_authorization, fmt::format(
"CREATE access on ALL KEYSPACES is denied to role {}", username));
}
}
schema_ptr try_get_internal_table(const data_dictionary::database& db, std::string_view table_name) {
size_t it = table_name.find(executor::INTERNAL_TABLE_PREFIX);
if (it != 0) {
return schema_ptr{};
}
table_name.remove_prefix(executor::INTERNAL_TABLE_PREFIX.size());
size_t delim = table_name.find_first_of('.');
if (delim == std::string_view::npos) {
return schema_ptr{};
}
std::string_view ks_name = table_name.substr(0, delim);
table_name.remove_prefix(ks_name.size() + 1);
// Only internal keyspaces can be accessed to avoid leakage
auto ks = db.try_find_keyspace(ks_name);
if (!ks || !ks->is_internal()) {
return schema_ptr{};
}
try {
return db.find_schema(ks_name, table_name);
} catch (data_dictionary::no_such_column_family&) {
// DynamoDB returns validation error even when table does not exist
// and the table name is invalid.
validate_table_name(table_name);
throw api_error::resource_not_found(
fmt::format("Requested resource not found: Internal table: {}.{} not found", ks_name, table_name));
}
}
schema_ptr get_table_from_batch_request(const service::storage_proxy& proxy, const rjson::value::ConstMemberIterator& batch_request) {
sstring table_name = rjson::to_sstring(batch_request->name); // JSON keys are always strings
try {
return proxy.data_dictionary().find_schema(sstring(executor::KEYSPACE_NAME_PREFIX) + table_name, table_name);
} catch(data_dictionary::no_such_column_family&) {
// DynamoDB returns validation error even when table does not exist
// and the table name is invalid.
validate_table_name(table_name);
throw api_error::resource_not_found(format("Requested resource not found: Table: {} not found", table_name));
}
}
lw_shared_ptr<stats> get_stats_from_schema(service::storage_proxy& sp, const schema& schema) {
try {
replica::table& table = sp.local_db().find_column_family(schema.id());
if (!table.get_stats().alternator_stats) {
table.get_stats().alternator_stats = seastar::make_shared<table_stats>(schema.ks_name(), schema.cf_name());
}
return table.get_stats().alternator_stats->_stats;
} catch (std::runtime_error&) {
// If we're here it means that a table we are currently working on was deleted before the
// operation completed, returning a temporary object is fine, if the table get deleted so will its metrics
return make_lw_shared<stats>();
}
}
void describe_single_item(const cql3::selection::selection& selection,
const std::vector<managed_bytes_opt>& result_row,
const std::optional<attrs_to_get>& attrs_to_get,
rjson::value& item,
uint64_t* item_length_in_bytes,
bool include_all_embedded_attributes)
{
const auto& columns = selection.get_columns();
auto column_it = columns.begin();
for (const managed_bytes_opt& cell : result_row) {
if (!cell) {
++column_it;
continue;
}
std::string column_name = (*column_it)->name_as_text();
if (column_name != executor::ATTRS_COLUMN_NAME) {
if (item_length_in_bytes) {
(*item_length_in_bytes) += column_name.length() + cell->size();
}
if (!attrs_to_get || attrs_to_get->contains(column_name)) {
// item is expected to start empty, and column_name are unique
// so add() makes sense
rjson::add_with_string_name(item, column_name, rjson::empty_object());
rjson::value& field = item[column_name.c_str()];
cell->with_linearized([&] (bytes_view linearized_cell) {
rjson::add_with_string_name(field, type_to_string((*column_it)->type), json_key_column_value(linearized_cell, **column_it));
});
}
} else {
auto deserialized = attrs_type()->deserialize(*cell);
auto keys_and_values = value_cast<map_type_impl::native_type>(deserialized);
for (auto entry : keys_and_values) {
std::string attr_name = value_cast<sstring>(entry.first);
if (item_length_in_bytes) {
(*item_length_in_bytes) += attr_name.length();
}
if (include_all_embedded_attributes || !attrs_to_get || attrs_to_get->contains(attr_name)) {
bytes value = value_cast<bytes>(entry.second);
if (item_length_in_bytes && value.length()) {
// ScyllaDB uses one extra byte compared to DynamoDB for the bytes length
(*item_length_in_bytes) += value.length() - 1;
}
rjson::value v = deserialize_item(value);
if (attrs_to_get) {
auto it = attrs_to_get->find(attr_name);
if (it != attrs_to_get->end()) {
// attrs_to_get may have asked for only part of
// this attribute. hierarchy_filter() modifies v,
// and returns false when nothing is to be kept.
if (!hierarchy_filter(v, it->second)) {
continue;
}
}
}
// item is expected to start empty, and attribute
// names are unique so add() makes sense
rjson::add_with_string_name(item, attr_name, std::move(v));
} else if (item_length_in_bytes) {
(*item_length_in_bytes) += value_cast<bytes>(entry.second).length() - 1;
}
}
}
++column_it;
}
}
std::optional<rjson::value> describe_single_item(schema_ptr schema,
const query::partition_slice& slice,
const cql3::selection::selection& selection,
const query::result& query_result,
const std::optional<attrs_to_get>& attrs_to_get,
uint64_t* item_length_in_bytes) {
rjson::value item = rjson::empty_object();
cql3::selection::result_set_builder builder(selection, gc_clock::now());
query::result_view::consume(query_result, slice, cql3::selection::result_set_builder::visitor(builder, *schema, selection));
auto result_set = builder.build();
if (result_set->empty()) {
if (item_length_in_bytes) {
// empty results is counted as having a minimal length (e.g. 1 byte).
(*item_length_in_bytes) += 1;
}
// If there is no matching item, we're supposed to return an empty
// object without an Item member - not one with an empty Item member
return {};
}
if (result_set->size() > 1) {
// If the result set contains multiple rows, the code should have
// called describe_multi_item(), not this function.
throw std::logic_error("describe_single_item() asked to describe multiple items");
}
describe_single_item(selection, *result_set->rows().begin(), attrs_to_get, item, item_length_in_bytes);
return item;
}
static void check_big_array(const rjson::value& val, int& size_left);
static void check_big_object(const rjson::value& val, int& size_left);
// For simplicity, we use a recursive implementation. This is fine because
// Alternator limits the depth of JSONs it reads from inputs, and doesn't
// add more than a couple of levels in its own output construction.
bool is_big(const rjson::value& val, int big_size) {
if (val.IsString()) {
return ssize_t(val.GetStringLength()) > big_size;
} else if (val.IsObject()) {
check_big_object(val, big_size);
return big_size < 0;
} else if (val.IsArray()) {
check_big_array(val, big_size);
return big_size < 0;
}
return false;
}
static void check_big_array(const rjson::value& val, int& size_left) {
// Assume a fixed size of 10 bytes for each number, boolean, etc., or
// beginning of a sub-object. This doesn't have to be accurate.
size_left -= 10 * val.Size();
for (const auto& v : val.GetArray()) {
if (size_left < 0) {
return;
}
// Note that we avoid recursive calls for the leaves (anything except
// array or object) because usually those greatly outnumber the trunk.
if (v.IsString()) {
size_left -= v.GetStringLength();
} else if (v.IsObject()) {
check_big_object(v, size_left);
} else if (v.IsArray()) {
check_big_array(v, size_left);
}
}
}
static void check_big_object(const rjson::value& val, int& size_left) {
size_left -= 10 * val.MemberCount();
for (const auto& m : val.GetObject()) {
if (size_left < 0) {
return;
}
size_left -= m.name.GetStringLength();
if (m.value.IsString()) {
size_left -= m.value.GetStringLength();
} else if (m.value.IsObject()) {
check_big_object(m.value, size_left);
} else if (m.value.IsArray()) {
check_big_array(m.value, size_left);
}
}
}
void validate_table_name(std::string_view name, const char* source) {
if (name.length() < 3 || name.length() > max_table_name_length) {
throw api_error::validation(
format("{} must be at least 3 characters long and at most {} characters long", source, max_table_name_length));
}
if (!valid_table_name_chars(name)) {
throw api_error::validation(
format("{} must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", source));
}
}
void validate_cdc_log_name_length(std::string_view table_name) {
if (cdc::log_name(table_name).length() > max_auxiliary_table_name_length) {
// CDC will add cdc_log_suffix ("_scylla_cdc_log") to the table name
// to create its log table, and this will exceed the maximum allowed
// length. To provide a more helpful error message, we assume that
// cdc::log_name() always adds a suffix of the same length.
int suffix_len = cdc::log_name(table_name).length() - table_name.length();
throw api_error::validation(fmt::format("Streams or vector search cannot be enabled on a table whose name is longer than {} characters: {}",
max_auxiliary_table_name_length - suffix_len, table_name));
}
}
body_writer make_streamed(rjson::value&& value) {
return [value = std::move(value)](output_stream<char>&& _out) mutable -> future<> {
auto out = std::move(_out);
std::exception_ptr ex;
try {
co_await rjson::print(value, out);
} catch (...) {
ex = std::current_exception();
}
co_await out.close();
co_await rjson::destroy_gently(std::move(value));
if (ex) {
co_await coroutine::return_exception_ptr(std::move(ex));
}
};
}
} // namespace alternator

View File

@@ -1,247 +0,0 @@
/*
* Copyright 2019-present ScyllaDB
*/
/*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
*/
// This header file, and the implementation file executor_util.cc, contain
// various utility functions that are reused in many different operations
// (API requests) across Alternator's code - in files such as executor.cc,
// executor_read.cc, streams.cc, ttl.cc, and more. These utility functions
// include things like extracting and validating pieces from a JSON request,
// checking permissions, constructing auxiliary table names, and more.
#pragma once
#include <map>
#include <optional>
#include <string>
#include <string_view>
#include <unordered_map>
#include <unordered_set>
#include <seastar/core/future.hh>
#include <seastar/util/noncopyable_function.hh>
#include "utils/rjson.hh"
#include "schema/schema_fwd.hh"
#include "types/types.hh"
#include "auth/permission.hh"
#include "alternator/stats.hh"
#include "alternator/attribute_path.hh"
#include "utils/managed_bytes.hh"
namespace query { class partition_slice; class result; }
namespace cql3::selection { class selection; }
namespace data_dictionary { class database; }
namespace service { class storage_proxy; class client_state; }
namespace alternator {
/// The body_writer is used for streaming responses - where the response body
/// is written in chunks to the output_stream. This allows for efficient
/// handling of large responses without needing to allocate a large buffer in
/// memory. It is one of the variants of executor::request_return_type.
using body_writer = noncopyable_function<future<>(output_stream<char>&&)>;
/// Get the value of an integer attribute, or an empty optional if it is
/// missing. If the attribute exists, but is not an integer, a descriptive
/// api_error is thrown.
std::optional<int> get_int_attribute(const rjson::value& value, std::string_view attribute_name);
/// Get the value of a string attribute, or a default value if it is missing.
/// If the attribute exists, but is not a string, a descriptive api_error is
/// thrown.
std::string get_string_attribute(const rjson::value& value, std::string_view attribute_name, const char* default_return);
/// Get the value of a boolean attribute, or a default value if it is missing.
/// If the attribute exists, but is not a bool, a descriptive api_error is
/// thrown.
bool get_bool_attribute(const rjson::value& value, std::string_view attribute_name, bool default_return);
/// Extract table name from a request.
/// Most requests expect the table's name to be listed in a "TableName" field.
/// get_table_name() returns the name or api_error in case the table name is
/// missing or not a string.
std::string get_table_name(const rjson::value& request);
/// find_table_name() is like get_table_name() except that it returns an
/// optional table name - it returns an empty optional when the TableName
/// is missing from the request, instead of throwing as get_table_name()
/// does. However, find_table_name() still throws if a TableName exists but
/// is not a string.
std::optional<std::string> find_table_name(const rjson::value& request);
/// Extract table schema from a request.
/// Many requests expect the table's name to be listed in a "TableName" field
/// and need to look it up as an existing table. The get_table() function
/// does this, with the appropriate validation and api_error in case the table
/// name is missing, invalid or the table doesn't exist. If everything is
/// successful, it returns the table's schema.
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
/// This find_table() variant is like get_table() excepts that it returns a
/// nullptr instead of throwing if the request does not mention a TableName.
/// In other cases of errors (i.e., a table is mentioned but doesn't exist)
/// this function throws too.
schema_ptr find_table(service::storage_proxy& proxy, const rjson::value& request);
/// This find_table() variant is like the previous one except that it takes
/// the table name directly instead of a request object. It is used in cases
/// where we already have the table name extracted from the request.
schema_ptr find_table(service::storage_proxy& proxy, std::string_view table_name);
// We would have liked to support table names up to 255 bytes, like DynamoDB.
// But Scylla creates a directory whose name is the table's name plus 33
// bytes (dash and UUID), and since directory names are limited to 255 bytes,
// we need to limit table names to 222 bytes, instead of 255. See issue #4480.
// We actually have two limits here,
// * max_table_name_length is the limit that Alternator will impose on names
// of new Alternator tables.
// * max_auxiliary_table_name_length is the potentially higher absolute limit
// that Scylla imposes on the names of auxiliary tables that Alternator
// wants to create internally - i.e. materialized views or CDC log tables.
// The second limit might mean that it is not possible to add a GSI to an
// existing table, because the name of the new auxiliary table may go over
// the limit. The second limit is also one of the reasons why the first limit
// is set lower than 222 - to have room to enable streams which add the extra
// suffix "_scylla_cdc_log" to the table name.
inline constexpr int max_table_name_length = 192;
inline constexpr int max_auxiliary_table_name_length = 222;
/// validate_table_name() validates the TableName parameter in a request - it
/// should be called in CreateTable, and in other requests only when noticing
/// that the named table doesn't exist.
/// The DynamoDB developer guide, https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.NamingRules
/// specifies that table "names must be between 3 and 255 characters long and
/// can contain only the following characters: a-z, A-Z, 0-9, _ (underscore),
/// - (dash), . (dot)". However, Alternator only allows max_table_name_length
/// characters (see above) - not 255.
/// validate_table_name() throws the appropriate api_error if this validation
/// fails.
void validate_table_name(std::string_view name, const char* source = "TableName");
/// Validate that a CDC log table could be created for the base table with a
/// given table_name, and if not, throw a user-visible api_error::validation.
/// It is not possible to create a CDC log table if the table name is so long
/// that adding the 15-character suffix "_scylla_cdc_log" (cdc_log_suffix)
/// makes it go over max_auxiliary_table_name_length.
/// Note that if max_table_name_length is set to less than 207 (which is
/// max_auxiliary_table_name_length-15), then this function will never
/// fail. However, it's still important to call it in UpdateTable, in case
/// we have pre-existing tables with names longer than this to avoid #24598.
void validate_cdc_log_name_length(std::string_view table_name);
/// Checks if a keyspace, given by its name, is an Alternator keyspace.
/// This just checks if the name begins in executor::KEYSPACE_NAME_PREFIX,
/// a prefix that all keyspaces created by Alternator's CreateTable use.
bool is_alternator_keyspace(std::string_view ks_name);
/// Wraps db::get_tags_of_table() and throws api_error::validation if the
/// table is missing the tags extension.
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
/// Returns a type object representing the type of the ":attrs" column used
/// by Alternator to store all non-key attribute. This type is a map from
/// string (attribute name) to bytes (serialized attribute value).
map_type attrs_type();
// In DynamoDB index names are local to a table, while in Scylla, materialized
// view names are global (in a keyspace). So we need to compose a unique name
// for the view taking into account both the table's name and the index name.
// We concatenate the table and index name separated by a delim character
// (a character not allowed by DynamoDB in ordinary table names, default: ":").
// The downside of this approach is that it limits the sum of the lengths,
// instead of each component individually as DynamoDB does.
// The view_name() function assumes the table_name has already been validated
// but validates the legality of index_name and the combination of both.
std::string view_name(std::string_view table_name, std::string_view index_name,
const std::string& delim = ":", bool validate_len = true);
std::string gsi_name(std::string_view table_name, std::string_view index_name,
bool validate_len = true);
std::string lsi_name(std::string_view table_name, std::string_view index_name,
bool validate_len = true);
/// After calling pk_from_json() and ck_from_json() to extract the pk and ck
/// components of a key, and if that succeeded, call check_key() to further
/// check that the key doesn't have any spurious components.
void check_key(const rjson::value& key, const schema_ptr& schema);
/// Fail with api_error::validation if the expression if has unused attribute
/// names or values. This is how DynamoDB behaves, so we do too.
void verify_all_are_used(const rjson::value* field,
const std::unordered_set<std::string>& used,
const char* field_name,
const char* operation);
/// Check CQL's Role-Based Access Control (RBAC) permission (MODIFY,
/// SELECT, DROP, etc.) on the given table. When permission is denied an
/// appropriate user-readable api_error::access_denied is thrown.
future<> verify_permission(bool enforce_authorization, bool warn_authorization, const service::client_state&, const schema_ptr&, auth::permission, stats& stats);
/// Similar to verify_permission() above, but just for CREATE operations.
/// Those do not operate on any specific table, so require permissions on
/// ALL KEYSPACES instead of any specific table.
future<> verify_create_permission(bool enforce_authorization, bool warn_authorization, const service::client_state&, stats& stats);
// Sets a KeySchema JSON array inside the given parent object describing the
// key attributes of the given schema as HASH or RANGE keys. Additionally,
// adds mappings from key attribute names to their DynamoDB type string into
// attribute_types.
void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string, std::string>* attribute_types = nullptr, const std::map<sstring, sstring>* tags = nullptr);
/// is_big() checks approximately if the given JSON value is "bigger" than
/// the given big_size number of bytes. The goal is to *quickly* detect
/// oversized JSON that, for example, is too large to be serialized to a
/// contiguous string - we don't need an accurate size for that. Moreover,
/// as soon as we detect that the JSON is indeed "big", we can return true
/// and don't need to continue calculating its exact size.
bool is_big(const rjson::value& val, int big_size = 100'000);
/// try_get_internal_table() handles the special case that the given table_name
/// begins with INTERNAL_TABLE_PREFIX (".scylla.alternator."). In that case,
/// this function assumes that the rest of the name refers to an internal
/// Scylla table (e.g., system table) and returns the schema of that table -
/// or an exception if it doesn't exist. Otherwise, if table_name does not
/// start with INTERNAL_TABLE_PREFIX, this function returns an empty schema_ptr
/// and the caller should look for a normal Alternator table with that name.
schema_ptr try_get_internal_table(const data_dictionary::database& db, std::string_view table_name);
/// get_table_from_batch_request() is used by batch write/read operations to
/// look up the schema for a table named in a batch request, by the JSON member
/// name (which is the table name in a BatchWriteItem or BatchGetItem request).
schema_ptr get_table_from_batch_request(const service::storage_proxy& proxy, const rjson::value::ConstMemberIterator& batch_request);
/// Returns (or lazily creates) the per-table stats object for the given schema.
/// If the table has been deleted, returns a temporary stats object.
lw_shared_ptr<stats> get_stats_from_schema(service::storage_proxy& sp, const schema& schema);
/// Writes one item's attributes into `item` from the given selection result
/// row. If include_all_embedded_attributes is true, all attributes from the
/// ATTRS_COLUMN map column are included regardless of attrs_to_get.
void describe_single_item(const cql3::selection::selection&,
const std::vector<managed_bytes_opt>&,
const std::optional<attrs_to_get>&,
rjson::value&,
uint64_t* item_length_in_bytes = nullptr,
bool include_all_embedded_attributes = false);
/// Converts a single result row to a JSON item, or returns an empty optional
/// if the result is empty.
std::optional<rjson::value> describe_single_item(schema_ptr,
const query::partition_slice&,
const cql3::selection::selection&,
const query::result&,
const std::optional<attrs_to_get>&,
uint64_t* item_length_in_bytes = nullptr);
/// Make a body_writer (function that can write output incrementally to the
/// HTTP stream) from the given JSON object.
/// Note: only useful for (very) large objects as there are overhead issues
/// with this as well, but for massive lists of return objects this can
/// help avoid large allocations/many re-allocs.
body_writer make_streamed(rjson::value&&);
} // namespace alternator

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "expressions.hh" #include "expressions.hh"
@@ -744,7 +744,7 @@ void validate_attr_name_length(std::string_view supplementary_context, size_t at
constexpr const size_t DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX = 65535; constexpr const size_t DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX = 65535;
const size_t max_length = is_key ? DYNAMODB_KEY_ATTR_NAME_SIZE_MAX : DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX; const size_t max_length = is_key ? DYNAMODB_KEY_ATTR_NAME_SIZE_MAX : DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX;
if (attr_name_length > max_length || attr_name_length == 0) { if (attr_name_length > max_length) {
std::string error_msg; std::string error_msg;
if (!error_msg_prefix.empty()) { if (!error_msg_prefix.empty()) {
error_msg += error_msg_prefix; error_msg += error_msg_prefix;
@@ -754,11 +754,7 @@ void validate_attr_name_length(std::string_view supplementary_context, size_t at
error_msg += supplementary_context; error_msg += supplementary_context;
error_msg += " - "; error_msg += " - ";
} }
if (attr_name_length == 0) { error_msg += fmt::format("Attribute name is too large, must be less than {} bytes", std::to_string(max_length + 1));
error_msg += "Empty attribute name";
} else {
error_msg += fmt::format("Attribute name is too large, must be less than {} bytes", std::to_string(max_length + 1));
}
throw api_error::validation(error_msg); throw api_error::validation(error_msg);
} }
} }

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
/* /*

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -50,7 +50,7 @@ public:
_operators.emplace_back(i); _operators.emplace_back(i);
check_depth_limit(); check_depth_limit();
} }
void add_dot(std::string name) { void add_dot(std::string(name)) {
_operators.emplace_back(std::move(name)); _operators.emplace_back(std::move(name));
check_depth_limit(); check_depth_limit();
} }
@@ -85,7 +85,7 @@ struct constant {
} }
}; };
// "value" is a value used in the right hand side of an assignment // "value" is is a value used in the right hand side of an assignment
// expression, "SET a = ...". It can be a constant (a reference to a value // expression, "SET a = ...". It can be a constant (a reference to a value
// included in the request, e.g., ":val"), a path to an attribute from the // included in the request, e.g., ":val"), a path to an attribute from the
// existing item (e.g., "a.b[3].c"), or a function of other such values. // existing item (e.g., "a.b[3].c"), or a function of other such values.
@@ -205,7 +205,7 @@ public:
// The supported primitive conditions are: // The supported primitive conditions are:
// 1. Binary operators - v1 OP v2, where OP is =, <>, <, <=, >, or >= and // 1. Binary operators - v1 OP v2, where OP is =, <>, <, <=, >, or >= and
// v1 and v2 are values - from the item (an attribute path), the query // v1 and v2 are values - from the item (an attribute path), the query
// (a ":val" reference), or a function of the above (only the size() // (a ":val" reference), or a function of the the above (only the size()
// function is supported). // function is supported).
// 2. Ternary operator - v1 BETWEEN v2 and v3 (means v1 >= v2 AND v1 <= v3). // 2. Ternary operator - v1 BETWEEN v2 and v3 (means v1 >= v2 AND v1 <= v3).
// 3. N-ary operator - v1 IN ( v2, v3, ... ) // 3. N-ary operator - v1 IN ( v2, v3, ... )

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "alternator/http_compression.hh" #include "alternator/http_compression.hh"
@@ -217,18 +217,20 @@ static sstring flatten(chunked_content&& cc) {
return result; return result;
} }
future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, std::optional<std::string_view> content_type, std::string&& response_body) { future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, const char* content_type, std::string&& response_body) {
response_compressor::compression_type ct = find_compression(accept_encoding, response_body.size()); response_compressor::compression_type ct = find_compression(accept_encoding, response_body.size());
if (ct != response_compressor::compression_type::none) { if (ct != response_compressor::compression_type::none) {
rep->add_header("Content-Encoding", get_encoding_name(ct)); rep->add_header("Content-Encoding", get_encoding_name(ct));
return compress(ct, cfg, std::move(response_body)).then([rep = std::move(rep), content_type] (chunked_content compressed) mutable { rep->set_content_type(content_type);
rep->write_body(content_type, flatten(std::move(compressed))); return compress(ct, cfg, std::move(response_body)).then([rep = std::move(rep)] (chunked_content compressed) mutable {
rep->_content = flatten(std::move(compressed));
return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep)); return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep));
}); });
} else { } else {
// Note that despite the move, response_body (std::string) is copied // Note that despite the move, there is a copy here -
// into an sstring when passed to write_body(). // as str is std::string and rep->_content is sstring.
rep->write_body(content_type, std::move(response_body)); rep->_content = std::move(response_body);
rep->set_content_type(content_type);
} }
return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep)); return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep));
} }
@@ -262,7 +264,7 @@ private:
} }
}; };
body_writer compress(response_compressor::compression_type ct, const db::config& cfg, body_writer&& bw) { executor::body_writer compress(response_compressor::compression_type ct, const db::config& cfg, executor::body_writer&& bw) {
return [bw = std::move(bw), ct, level = cfg.alternator_response_gzip_compression_level()](output_stream<char>&& out) mutable -> future<> { return [bw = std::move(bw), ct, level = cfg.alternator_response_gzip_compression_level()](output_stream<char>&& out) mutable -> future<> {
output_stream_options opts; output_stream_options opts;
opts.trim_to_size = true; opts.trim_to_size = true;
@@ -285,7 +287,7 @@ body_writer compress(response_compressor::compression_type ct, const db::config&
}; };
} }
future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, std::optional<std::string_view> content_type, body_writer&& body_writer) { future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, const char* content_type, executor::body_writer&& body_writer) {
response_compressor::compression_type ct = find_compression(accept_encoding, std::numeric_limits<size_t>::max()); response_compressor::compression_type ct = find_compression(accept_encoding, std::numeric_limits<size_t>::max());
if (ct != response_compressor::compression_type::none) { if (ct != response_compressor::compression_type::none) {
rep->add_header("Content-Encoding", get_encoding_name(ct)); rep->add_header("Content-Encoding", get_encoding_name(ct));

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -83,9 +83,9 @@ private:
public: public:
future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep, future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep,
sstring accept_encoding, std::optional<std::string_view> content_type, std::string&& response_body); sstring accept_encoding, const char* content_type, std::string&& response_body);
future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep, future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep,
sstring accept_encoding, std::optional<std::string_view> content_type, body_writer&& body_writer); sstring accept_encoding, const char* content_type, executor::body_writer&& body_writer);
}; };
} }

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "expressions.hh" #include "expressions.hh"

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "utils/base64.hh" #include "utils/base64.hh"
@@ -14,12 +14,12 @@
#include "types/concrete_types.hh" #include "types/concrete_types.hh"
#include "types/json_utils.hh" #include "types/json_utils.hh"
#include "mutation/position_in_partition.hh" #include "mutation/position_in_partition.hh"
#include "alternator/executor_util.hh"
static logging::logger slogger("alternator-serialization"); static logging::logger slogger("alternator-serialization");
namespace alternator { namespace alternator {
bool is_alternator_keyspace(const sstring& ks_name);
type_info type_info_from_string(std::string_view type) { type_info type_info_from_string(std::string_view type) {
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = { static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -55,7 +55,7 @@ partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema); clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema); position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
// If v encodes a number (i.e., it is a {"N": [...]}), returns an object representing it. Otherwise, // If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
// raises ValidationException with diagnostic. // raises ValidationException with diagnostic.
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic); big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);

View File

@@ -3,12 +3,10 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "alternator/server.hh" #include "alternator/server.hh"
#include "audit/audit.hh"
#include "alternator/executor_util.hh"
#include "gms/application_state.hh" #include "gms/application_state.hh"
#include "utils/log.hh" #include "utils/log.hh"
#include <fmt/ranges.h> #include <fmt/ranges.h>
@@ -28,7 +26,6 @@
#include "auth.hh" #include "auth.hh"
#include <cctype> #include <cctype>
#include <string_view> #include <string_view>
#include <algorithm>
#include <utility> #include <utility>
#include "service/storage_proxy.hh" #include "service/storage_proxy.hh"
#include "gms/gossiper.hh" #include "gms/gossiper.hh"
@@ -113,18 +110,10 @@ class api_handler : public handler_base {
// "application/json". Some other AWS services use later versions instead // "application/json". Some other AWS services use later versions instead
// of "1.0", but DynamoDB currently uses "1.0". Note that this content // of "1.0", but DynamoDB currently uses "1.0". Note that this content
// type applies to all replies, both success and error. // type applies to all replies, both success and error.
static constexpr std::string_view REPLY_CONTENT_TYPE = "application/x-amz-json-1.0"; static constexpr const char* REPLY_CONTENT_TYPE = "application/x-amz-json-1.0";
public: public:
api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle, api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle,
const db::config& config) : const db::config& config) : _response_compressor(config), _f_handle(
_content_type(config.alternator_http_response_disable_content_type_header()
? std::nullopt
: std::optional<std::string_view>(REPLY_CONTENT_TYPE)),
_content_type_observer(config.alternator_http_response_disable_content_type_header.observe(
[this](const bool& ct) {
_content_type = ct ? std::nullopt : std::optional<std::string_view>(REPLY_CONTENT_TYPE);
})),
_response_compressor(config), _f_handle(
[this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) { [this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
sstring accept_encoding = _response_compressor.get_accepted_encoding(*req); sstring accept_encoding = _response_compressor.get_accepted_encoding(*req);
return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped( return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped(
@@ -151,11 +140,11 @@ public:
return std::visit(overloaded_functor { return std::visit(overloaded_functor {
[&] (std::string&& str) { [&] (std::string&& str) {
return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding), return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding),
_content_type, std::move(str)); REPLY_CONTENT_TYPE, std::move(str));
}, },
[&] (body_writer&& body_writer) { [&] (executor::body_writer&& body_writer) {
return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding), return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding),
_content_type, std::move(body_writer)); REPLY_CONTENT_TYPE, std::move(body_writer));
}, },
[&] (const api_error& err) { [&] (const api_error& err) {
generate_error_reply(*rep, err); generate_error_reply(*rep, err);
@@ -165,18 +154,18 @@ public:
}); });
}) { } }) { }
api_handler(const api_handler&) = delete; api_handler(const api_handler&) = default;
future<std::unique_ptr<reply>> handle(const sstring& path, future<std::unique_ptr<reply>> handle(const sstring& path,
std::unique_ptr<request> req, std::unique_ptr<reply> rep) override { std::unique_ptr<request> req, std::unique_ptr<reply> rep) override {
handle_CORS(*req, *rep, false); handle_CORS(*req, *rep, false);
return _f_handle(std::move(req), std::move(rep)); return _f_handle(std::move(req), std::move(rep)).then(
[](std::unique_ptr<reply> rep) {
rep->done();
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
});
} }
protected: protected:
std::optional<std::string_view> _content_type;
utils::observer<bool> _content_type_observer;
response_compressor _response_compressor;
future_handler_function _f_handle;
void generate_error_reply(reply& rep, const api_error& err) { void generate_error_reply(reply& rep, const api_error& err) {
rjson::value results = rjson::empty_object(); rjson::value results = rjson::empty_object();
if (!err._extra_fields.IsNull() && err._extra_fields.IsObject()) { if (!err._extra_fields.IsNull() && err._extra_fields.IsObject()) {
@@ -184,11 +173,14 @@ protected:
} }
rjson::add(results, "__type", rjson::from_string("com.amazonaws.dynamodb.v20120810#" + err._type)); rjson::add(results, "__type", rjson::from_string("com.amazonaws.dynamodb.v20120810#" + err._type));
rjson::add(results, "message", err._msg); rjson::add(results, "message", err._msg);
sstring content = rjson::print(std::move(results)); rep._content = rjson::print(std::move(results));
slogger.trace("api_handler error case: {}", content); rep._status = err._http_code;
rep.set_status(err._http_code); rep.set_content_type(REPLY_CONTENT_TYPE);
rep.write_body(_content_type, std::move(content)); slogger.trace("api_handler error case: {}", rep._content);
} }
response_compressor _response_compressor;
future_handler_function _f_handle;
}; };
class gated_handler : public handler_base { class gated_handler : public handler_base {
@@ -262,7 +254,8 @@ protected:
} }
} }
rep->set_status(reply::status_type::ok); rep->set_status(reply::status_type::ok);
rep->write_body("json", rjson::print(results)); rep->set_content_type("json");
rep->_content = rjson::print(results);
return make_ready_future<std::unique_ptr<reply>>(std::move(rep)); return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
} }
}; };
@@ -418,8 +411,8 @@ future<std::string> server::verify_signature(const request& req, const chunked_c
} }
} }
auto cache_getter = [&proxy = _proxy] (std::string username) { auto cache_getter = [&proxy = _proxy, &as = _auth_service] (std::string username) {
return get_key_from_roles(proxy, std::move(username)); return get_key_from_roles(proxy, as, std::move(username));
}; };
return _key_cache.get_ptr(user, cache_getter).then_wrapped([this, &req, &content, return _key_cache.get_ptr(user, cache_getter).then_wrapped([this, &req, &content,
user = std::move(user), user = std::move(user),
@@ -706,17 +699,6 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
// for such a size. // for such a size.
co_return api_error::payload_too_large(fmt::format("Request content length limit of {} bytes exceeded", request_content_length_limit)); co_return api_error::payload_too_large(fmt::format("Request content length limit of {} bytes exceeded", request_content_length_limit));
} }
// Check the concurrency limit early, before acquiring memory and
// reading the request body, to avoid piling up memory from excess
// requests that will be rejected anyway. This mirrors the CQL
// transport which also checks concurrency before memory acquisition
// (transport/server.cc).
if (_pending_requests.get_count() >= _max_concurrent_requests) {
_executor._stats.requests_shed++;
co_return api_error::request_limit_exceeded(format("too many in-flight requests (configured via max_concurrent_requests_per_shard): {}", _pending_requests.get_count()));
}
_pending_requests.enter();
auto leave = defer([this] () noexcept { _pending_requests.leave(); });
// JSON parsing can allocate up to roughly 2x the size of the raw // JSON parsing can allocate up to roughly 2x the size of the raw
// document, + a couple of bytes for maintenance. // document, + a couple of bytes for maintenance.
// If the Content-Length of the request is not available, we assume // If the Content-Length of the request is not available, we assume
@@ -728,7 +710,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
++_executor._stats.requests_blocked_memory; ++_executor._stats.requests_blocked_memory;
} }
auto units = co_await std::move(units_fut); auto units = co_await std::move(units_fut);
throwing_assert(req->content_stream); SCYLLA_ASSERT(req->content_stream);
chunked_content content = co_await read_entire_stream(*req->content_stream, request_content_length_limit); chunked_content content = co_await read_entire_stream(*req->content_stream, request_content_length_limit);
// If the request had no Content-Length, we reserved too many units // If the request had no Content-Length, we reserved too many units
// so need to return some // so need to return some
@@ -778,12 +760,18 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
_executor._stats.unsupported_operations++; _executor._stats.unsupported_operations++;
co_return api_error::unknown_operation(fmt::format("Unsupported operation {}", op)); co_return api_error::unknown_operation(fmt::format("Unsupported operation {}", op));
} }
if (_pending_requests.get_count() >= _max_concurrent_requests) {
_executor._stats.requests_shed++;
co_return api_error::request_limit_exceeded(format("too many in-flight requests (configured via max_concurrent_requests_per_shard): {}", _pending_requests.get_count()));
}
_pending_requests.enter();
auto leave = defer([this] () noexcept { _pending_requests.leave(); });
executor::client_state client_state(service::client_state::external_tag(), executor::client_state client_state(service::client_state::external_tag(),
_auth_service, &_sl_controller, _timeout_config.current_values(), req->get_client_address()); _auth_service, &_sl_controller, _timeout_config.current_values(), req->get_client_address());
if (!username.empty()) { if (!username.empty()) {
client_state.set_login(auth::authenticated_user(username)); client_state.set_login(auth::authenticated_user(username));
} }
client_state.maybe_update_per_service_level_params(); co_await client_state.maybe_update_per_service_level_params();
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content, _max_users_query_size_in_trace_output.get()); tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content, _max_users_query_size_in_trace_output.get());
tracing::trace(trace_state, "{}", op); tracing::trace(trace_state, "{}", op);
@@ -792,25 +780,12 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
auto f = [this, content = std::move(content), &callback = callback_it->second, auto f = [this, content = std::move(content), &callback = callback_it->second,
client_state = std::move(client_state), trace_state = std::move(trace_state), client_state = std::move(client_state), trace_state = std::move(trace_state),
units = std::move(units), req = std::move(req)] () mutable -> future<executor::request_return_type> { units = std::move(units), req = std::move(req)] () mutable -> future<executor::request_return_type> {
rjson::value json_request = co_await _json_parser.parse(std::move(content)); rjson::value json_request = co_await _json_parser.parse(std::move(content));
if (!json_request.IsObject()) { if (!json_request.IsObject()) {
co_return api_error::validation("Request content must be an object"); co_return api_error::validation("Request content must be an object");
} }
std::unique_ptr<audit::audit_info_alternator> audit_info; co_return co_await callback(_executor, client_state, trace_state,
std::exception_ptr ex = {}; make_service_permit(std::move(units)), std::move(json_request), std::move(req));
executor::request_return_type ret;
try {
ret = co_await callback(_executor, client_state, trace_state, make_service_permit(std::move(units)), std::move(json_request), std::move(req), audit_info);
} catch (...) {
ex = std::current_exception();
}
if (audit_info) {
co_await audit::inspect(*audit_info, client_state, ex != nullptr);
}
if (ex) {
co_return coroutine::exception(std::move(ex));
}
co_return ret;
}; };
co_return co_await _sl_controller.with_user_service_level(user, std::ref(f)); co_return co_await _sl_controller.with_user_service_level(user, std::ref(f));
} }
@@ -854,96 +829,81 @@ server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gos
, _pending_requests("alternator::server::pending_requests") , _pending_requests("alternator::server::pending_requests")
, _timeout_config(_proxy.data_dictionary().get_config()) , _timeout_config(_proxy.data_dictionary().get_config())
, _callbacks{ , _callbacks{
{"CreateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"CreateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.create_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.create_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"DescribeTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DescribeTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.describe_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.describe_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"DeleteTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DeleteTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.delete_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.delete_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"UpdateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"UpdateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.update_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.update_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"PutItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"PutItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.put_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.put_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"UpdateItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"UpdateItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.update_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.update_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"GetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"GetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"DeleteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DeleteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.delete_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.delete_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"ListTables", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"ListTables", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.list_tables(client_state, std::move(permit), std::move(json_request), audit_info); return e.list_tables(client_state, std::move(permit), std::move(json_request));
}}, }},
{"Scan", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"Scan", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.scan(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.scan(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.describe_endpoints(client_state, std::move(permit), std::move(json_request), req->get_header("Host"), audit_info); return e.describe_endpoints(client_state, std::move(permit), std::move(json_request), req->get_header("Host"));
}}, }},
{"BatchWriteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"BatchWriteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.batch_write_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.batch_write_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"BatchGetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"BatchGetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.batch_get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.batch_get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"Query", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"Query", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.query(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.query(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"TagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"TagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.tag_resource(client_state, std::move(permit), std::move(json_request), audit_info); return e.tag_resource(client_state, std::move(permit), std::move(json_request));
}}, }},
{"UntagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"UntagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.untag_resource(client_state, std::move(permit), std::move(json_request), audit_info); return e.untag_resource(client_state, std::move(permit), std::move(json_request));
}}, }},
{"ListTagsOfResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"ListTagsOfResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.list_tags_of_resource(client_state, std::move(permit), std::move(json_request), audit_info); return e.list_tags_of_resource(client_state, std::move(permit), std::move(json_request));
}}, }},
{"UpdateTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"UpdateTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.update_time_to_live(client_state, std::move(permit), std::move(json_request), audit_info); return e.update_time_to_live(client_state, std::move(permit), std::move(json_request));
}}, }},
{"DescribeTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DescribeTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.describe_time_to_live(client_state, std::move(permit), std::move(json_request), audit_info); return e.describe_time_to_live(client_state, std::move(permit), std::move(json_request));
}}, }},
{"ListStreams", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"ListStreams", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.list_streams(client_state, std::move(permit), std::move(json_request), audit_info); return e.list_streams(client_state, std::move(permit), std::move(json_request));
}}, }},
{"DescribeStream", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DescribeStream", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.describe_stream(client_state, std::move(permit), std::move(json_request), audit_info); return e.describe_stream(client_state, std::move(permit), std::move(json_request));
}}, }},
{"GetShardIterator", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"GetShardIterator", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.get_shard_iterator(client_state, std::move(permit), std::move(json_request), audit_info); return e.get_shard_iterator(client_state, std::move(permit), std::move(json_request));
}}, }},
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info); return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
}}, }},
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) { {"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request), audit_info); return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request));
}}, }},
} { } {
} }
// Sanitize an HTTP header value: strip control characters (RFC 7230 §3.2.6)
// and leading/trailing whitespace. Returns nullopt if the result is empty.
static std::optional<sstring> sanitize_header_value(const sstring& v, std::string_view option_name) {
std::string sanitized(v.begin(), v.end());
sanitized.erase(std::remove_if(sanitized.begin(), sanitized.end(),
[](unsigned char c) { return std::iscntrl(c); }), sanitized.end());
if (sanitized.size() != v.size()) {
slogger.warn("Configuration option '{}' contained control characters, they were stripped", option_name);
}
std::string_view trimmed = sanitized;
while (!trimmed.empty() && std::isspace((unsigned char)trimmed.front())) trimmed.remove_prefix(1);
while (!trimmed.empty() && std::isspace((unsigned char)trimmed.back())) trimmed.remove_suffix(1);
return trimmed.empty() ? std::nullopt : std::optional<sstring>(trimmed);
}
future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port,
std::optional<uint16_t> port_proxy_protocol, std::optional<uint16_t> https_port_proxy_protocol, std::optional<uint16_t> port_proxy_protocol, std::optional<uint16_t> https_port_proxy_protocol,
std::optional<tls::credentials_builder> creds, std::optional<tls::credentials_builder> creds,
@@ -961,24 +921,6 @@ future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std:
return seastar::async([this, addr, port, https_port, port_proxy_protocol, https_port_proxy_protocol, creds] { return seastar::async([this, addr, port, https_port, port_proxy_protocol, https_port_proxy_protocol, creds] {
_executor.start().get(); _executor.start().get();
// Apply current config values and register observers for live updates
// before listen() so that no responses are ever sent with stale defaults.
// Both options drive Seastar's built-in header generation directly.
const db::config& cfg = _proxy.data_dictionary().get_config();
auto apply_server_header = [this] (const sstring& v) {
auto opt = sanitize_header_value(v, "alternator_http_response_server_header");
_http_server.set_server_header(opt);
_https_server.set_server_header(opt);
};
auto apply_date_header = [this] (const bool& disable) {
_http_server.set_generate_date_header(!disable);
_https_server.set_generate_date_header(!disable);
};
apply_server_header(cfg.alternator_http_response_server_header());
apply_date_header(cfg.alternator_http_response_disable_date_header());
_server_header_observer = cfg.alternator_http_response_server_header.observe(std::move(apply_server_header));
_date_header_observer = cfg.alternator_http_response_disable_date_header.observe(std::move(apply_date_header));
if (port || port_proxy_protocol) { if (port || port_proxy_protocol) {
set_routes(_http_server._routes); set_routes(_http_server._routes);
_http_server.set_content_streaming(true); _http_server.set_content_streaming(true);

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -34,7 +34,7 @@ class server : public peering_sharded_service<server> {
// DynamoDB also has the same limit set to 16 MB. // DynamoDB also has the same limit set to 16 MB.
static constexpr size_t request_content_length_limit = 16*MB; static constexpr size_t request_content_length_limit = 16*MB;
using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&, using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&,
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>, std::unique_ptr<audit::audit_info_alternator>&)>; tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>)>;
using alternator_callbacks_map = std::unordered_map<std::string_view, alternator_callback>; using alternator_callbacks_map = std::unordered_map<std::string_view, alternator_callback>;
httpd::http_server _http_server; httpd::http_server _http_server;
@@ -97,10 +97,6 @@ class server : public peering_sharded_service<server> {
}; };
utils::scoped_item_list<ongoing_request> _ongoing_requests; utils::scoped_item_list<ongoing_request> _ongoing_requests;
// Observers for live-update config options that drive Seastar HTTP server state.
std::optional<utils::observer<sstring>> _server_header_observer;
std::optional<utils::observer<bool>> _date_header_observer;
public: public:
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller); server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller);

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "stats.hh" #include "stats.hh"
@@ -14,6 +14,20 @@
namespace alternator { namespace alternator {
const char* ALTERNATOR_METRICS = "alternator"; const char* ALTERNATOR_METRICS = "alternator";
static seastar::metrics::histogram estimated_histogram_to_metrics(const utils::estimated_histogram& histogram) {
seastar::metrics::histogram res;
res.buckets.resize(histogram.bucket_offsets.size());
uint64_t cumulative_count = 0;
res.sample_count = histogram._count;
res.sample_sum = histogram._sample_sum;
for (size_t i = 0; i < res.buckets.size(); i++) {
auto& v = res.buckets[i];
v.upper_bound = histogram.bucket_offsets[i];
cumulative_count += histogram.buckets[i];
v.count = cumulative_count;
}
return res;
}
static seastar::metrics::label column_family_label("cf"); static seastar::metrics::label column_family_label("cf");
static seastar::metrics::label keyspace_label("ks"); static seastar::metrics::label keyspace_label("ks");
@@ -137,21 +151,21 @@ static void register_metrics_with_optional_table(seastar::metrics::metric_groups
seastar::metrics::make_counter("batch_item_count", seastar::metrics::description("The total number of items processed across all batches"), labels, seastar::metrics::make_counter("batch_item_count", seastar::metrics::description("The total number of items processed across all batches"), labels,
stats.api_operations.batch_get_item_batch_total)(op("BatchGetItem")).aggregate(aggregate_labels).set_skip_when_empty(), stats.api_operations.batch_get_item_batch_total)(op("BatchGetItem")).aggregate(aggregate_labels).set_skip_when_empty(),
seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels, seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels,
[&stats]{ return to_metrics_histogram(stats.api_operations.batch_get_item_histogram);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.api_operations.batch_get_item_histogram);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels, seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels,
[&stats]{ return to_metrics_histogram(stats.api_operations.batch_write_item_histogram);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.api_operations.batch_write_item_histogram);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels, seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
[&stats]{ return to_metrics_histogram(stats.operation_sizes.get_item_op_size_kb);})(op("GetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.get_item_op_size_kb);})(op("GetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels, seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
[&stats]{ return to_metrics_histogram(stats.operation_sizes.put_item_op_size_kb);})(op("PutItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.put_item_op_size_kb);})(op("PutItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels, seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
[&stats]{ return to_metrics_histogram(stats.operation_sizes.delete_item_op_size_kb);})(op("DeleteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.delete_item_op_size_kb);})(op("DeleteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels, seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
[&stats]{ return to_metrics_histogram(stats.operation_sizes.update_item_op_size_kb);})(op("UpdateItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.update_item_op_size_kb);})(op("UpdateItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels, seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
[&stats]{ return to_metrics_histogram(stats.operation_sizes.batch_get_item_op_size_kb);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.batch_get_item_op_size_kb);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels, seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
[&stats]{ return to_metrics_histogram(stats.operation_sizes.batch_write_item_op_size_kb);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(), [&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.batch_write_item_op_size_kb);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
}); });
seastar::metrics::label expression_label("expression"); seastar::metrics::label expression_label("expression");

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -16,8 +16,6 @@
#include "cql3/stats.hh" #include "cql3/stats.hh"
namespace alternator { namespace alternator {
using batch_histogram = utils::estimated_histogram_with_max<128>;
using op_size_histogram = utils::estimated_histogram_with_max<512>;
// Object holding per-shard statistics related to Alternator. // Object holding per-shard statistics related to Alternator.
// While this object is alive, these metrics are also registered to be // While this object is alive, these metrics are also registered to be
@@ -78,34 +76,34 @@ public:
utils::timed_rate_moving_average_summary_and_histogram batch_get_item_latency; utils::timed_rate_moving_average_summary_and_histogram batch_get_item_latency;
utils::timed_rate_moving_average_summary_and_histogram get_records_latency; utils::timed_rate_moving_average_summary_and_histogram get_records_latency;
batch_histogram batch_get_item_histogram; utils::estimated_histogram batch_get_item_histogram{22}; // a histogram that covers the range 1 - 100
batch_histogram batch_write_item_histogram; utils::estimated_histogram batch_write_item_histogram{22}; // a histogram that covers the range 1 - 100
} api_operations; } api_operations;
// Operation size metrics // Operation size metrics
struct { struct {
// Item size statistics collected per table and aggregated per node. // Item size statistics collected per table and aggregated per node.
// Each histogram covers the range 0 - 512. Resolves #25143. // Each histogram covers the range 0 - 446. Resolves #25143.
// A size is the retrieved item's size. // A size is the retrieved item's size.
op_size_histogram get_item_op_size_kb; utils::estimated_histogram get_item_op_size_kb{30};
// A size is the maximum of the new item's size and the old item's size. // A size is the maximum of the new item's size and the old item's size.
op_size_histogram put_item_op_size_kb; utils::estimated_histogram put_item_op_size_kb{30};
// A size is the deleted item's size. If the deleted item's size is // A size is the deleted item's size. If the deleted item's size is
// unknown (i.e. read-before-write wasn't necessary and it wasn't // unknown (i.e. read-before-write wasn't necessary and it wasn't
// forced by a configuration option), it won't be recorded on the // forced by a configuration option), it won't be recorded on the
// histogram. // histogram.
op_size_histogram delete_item_op_size_kb; utils::estimated_histogram delete_item_op_size_kb{30};
// A size is the maximum of existing item's size and the estimated size // A size is the maximum of existing item's size and the estimated size
// of the update. This will be changed to the maximum of the existing item's // of the update. This will be changed to the maximum of the existing item's
// size and the new item's size in a subsequent PR. // size and the new item's size in a subsequent PR.
op_size_histogram update_item_op_size_kb; utils::estimated_histogram update_item_op_size_kb{30};
// A size is the sum of the sizes of all items per table. This means // A size is the sum of the sizes of all items per table. This means
// that a single BatchGetItem / BatchWriteItem updates the histogram // that a single BatchGetItem / BatchWriteItem updates the histogram
// for each table that it has items in. // for each table that it has items in.
// The sizes are the retrieved items' sizes grouped per table. // The sizes are the retrieved items' sizes grouped per table.
op_size_histogram batch_get_item_op_size_kb; utils::estimated_histogram batch_get_item_op_size_kb{30};
// The sizes are the the written items' sizes grouped per table. // The sizes are the the written items' sizes grouped per table.
op_size_histogram batch_write_item_op_size_kb; utils::estimated_histogram batch_write_item_op_size_kb{30};
} operation_sizes; } operation_sizes;
// Count of authentication and authorization failures, counted if either // Count of authentication and authorization failures, counted if either
// alternator_enforce_authorization or alternator_warn_authorization are // alternator_enforce_authorization or alternator_warn_authorization are
@@ -142,7 +140,7 @@ public:
cql3::cql_stats cql_stats; cql3::cql_stats cql_stats;
// Enumeration of expression types only for stats // Enumeration of expression types only for stats
// if needed it can be extended e.g. per operation // if needed it can be extended e.g. per operation
enum expression_types { enum expression_types {
UPDATE_EXPRESSION, UPDATE_EXPRESSION,
CONDITION_EXPRESSION, CONDITION_EXPRESSION,
@@ -166,7 +164,7 @@ struct table_stats {
void register_metrics(seastar::metrics::metric_groups& metrics, const stats& stats); void register_metrics(seastar::metrics::metric_groups& metrics, const stats& stats);
inline uint64_t bytes_to_kb_ceil(uint64_t bytes) { inline uint64_t bytes_to_kb_ceil(uint64_t bytes) {
return (bytes) / 1024; return (bytes + 1023) / 1024;
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,62 +0,0 @@
/*
* Copyright 2026-present ScyllaDB
*/
/*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
*/
#pragma once
#include "utils/chunked_vector.hh"
#include "cdc/generation.hh"
#include <generator>
namespace cdc {
class stream_id;
}
namespace alternator {
class stream_id_range {
// helper class for manipulating (possibly wrapped around) range of stream_ids
// it holds one or two ranges [lo1, end1) and [lo2, end2)
// if the range doesn't wrap around, then lo2 == end2 == items.end()
// if the range wraps around, then
// `lo1 == items.begin() and end2 == items.end()` must be true
// the object doesn't own `items`, but it does manipulate it - it will
// reorder elements (so both ranges were next to each other) and sort them by unsigned comparison
// usage - create an object with needed ranges. before iteration call `prepare_for_iterating` method -
// it will reorder elements of `items` array to what is needed and then call begin / end pair.
// note - `items` array will be modified - elements will be reordered, but no elements will be added or removed.
// `items` array must stay intact as long as iteration is in progress.
utils::chunked_vector<cdc::stream_id>::iterator _lo1 = {}, _end1 = {}, _lo2 = {}, _end2 = {};
const cdc::stream_id* _skip_to = nullptr;
bool _prepared = false;
public:
stream_id_range(
utils::chunked_vector<cdc::stream_id> &items,
utils::chunked_vector<cdc::stream_id>::iterator lo1,
utils::chunked_vector<cdc::stream_id>::iterator end1);
stream_id_range(
utils::chunked_vector<cdc::stream_id> &items,
utils::chunked_vector<cdc::stream_id>::iterator lo1,
utils::chunked_vector<cdc::stream_id>::iterator end1,
utils::chunked_vector<cdc::stream_id>::iterator lo2,
utils::chunked_vector<cdc::stream_id>::iterator end2);
void set_starting_position(const cdc::stream_id &update_to);
// Must be called after construction and after set_starting_position()
// (if used), but before begin()/end() iteration.
void prepare_for_iterating();
utils::chunked_vector<cdc::stream_id>::iterator begin() const { return _lo1; }
utils::chunked_vector<cdc::stream_id>::iterator end() const { return _end1; }
};
stream_id_range find_children_range_from_parent_token(
const utils::chunked_vector<cdc::stream_id>& parent_streams,
utils::chunked_vector<cdc::stream_id>& current_streams,
cdc::stream_id parent,
bool uses_tablets
);
}

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include <chrono> #include <chrono>
@@ -44,10 +44,8 @@
#include "cql3/query_options.hh" #include "cql3/query_options.hh"
#include "cql3/column_identifier.hh" #include "cql3/column_identifier.hh"
#include "alternator/executor.hh" #include "alternator/executor.hh"
#include "alternator/executor_util.hh"
#include "alternator/controller.hh" #include "alternator/controller.hh"
#include "alternator/serialization.hh" #include "alternator/serialization.hh"
#include "alternator/ttl_tag.hh"
#include "dht/sharder.hh" #include "dht/sharder.hh"
#include "db/config.hh" #include "db/config.hh"
#include "db/tags/utils.hh" #include "db/tags/utils.hh"
@@ -59,17 +57,22 @@ static logging::logger tlogger("alternator_ttl");
namespace alternator { namespace alternator {
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info) { // We write the expiration-time attribute enabled on a table in a
// tag TTL_TAG_KEY.
// Currently, the *value* of this tag is simply the name of the attribute,
// and the expiration scanner interprets it as an Alternator attribute name -
// It can refer to a real column or if that doesn't exist, to a member of
// the ":attrs" map column. Although this is designed for Alternator, it may
// be good enough for CQL as well (there, the ":attrs" column won't exist).
extern const sstring TTL_TAG_KEY;
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
_stats.api_operations.update_time_to_live++; _stats.api_operations.update_time_to_live++;
if (!_proxy.features().alternator_ttl) { if (!_proxy.features().alternator_ttl) {
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Upgrade all nodes to a version that supports it."); co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator-ttl' experimental feature is enabled on all nodes.");
} }
schema_ptr schema = get_table(_proxy, request); schema_ptr schema = get_table(_proxy, request);
maybe_audit(audit_info, audit::statement_category::DDL,
schema->ks_name(), schema->cf_name(), "UpdateTimeToLive", request);
rjson::value* spec = rjson::find(request, "TimeToLiveSpecification"); rjson::value* spec = rjson::find(request, "TimeToLiveSpecification");
if (!spec || !spec->IsObject()) { if (!spec || !spec->IsObject()) {
co_return api_error::validation("UpdateTimeToLive missing mandatory TimeToLiveSpecification"); co_return api_error::validation("UpdateTimeToLive missing mandatory TimeToLiveSpecification");
@@ -119,13 +122,9 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
co_return rjson::print(std::move(response)); co_return rjson::print(std::move(response));
} }
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info) { future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
_stats.api_operations.describe_time_to_live++; _stats.api_operations.describe_time_to_live++;
schema_ptr schema = get_table(_proxy, request); schema_ptr schema = get_table(_proxy, request);
maybe_audit(audit_info, audit::statement_category::QUERY,
schema->ks_name(), schema->cf_name(), "DescribeTimeToLive", request);
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema); std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
rjson::value desc = rjson::empty_object(); rjson::value desc = rjson::empty_object();
auto i = tags_map.find(TTL_TAG_KEY); auto i = tags_map.find(TTL_TAG_KEY);
@@ -142,7 +141,7 @@ future<executor::request_return_type> executor::describe_time_to_live(client_sta
// expiration_service is a sharded service responsible for cleaning up expired // expiration_service is a sharded service responsible for cleaning up expired
// items in all tables with per-item expiration enabled. Currently, this means // items in all tables with per-item expiration enabled. Currently, this means
// Alternator tables with TTL configured via an UpdateTimeToLive request. // Alternator tables with TTL configured via a UpdateTimeToLive request.
// //
// Here is a brief overview of how the expiration service works: // Here is a brief overview of how the expiration service works:
// //
@@ -325,7 +324,9 @@ static future<std::vector<std::pair<dht::token_range, locator::host_id>>> get_se
const auto& tm = *erm->get_token_metadata_ptr(); const auto& tm = *erm->get_token_metadata_ptr();
const auto& sorted_tokens = tm.sorted_tokens(); const auto& sorted_tokens = tm.sorted_tokens();
std::vector<std::pair<dht::token_range, locator::host_id>> ret; std::vector<std::pair<dht::token_range, locator::host_id>> ret;
throwing_assert(!sorted_tokens.empty()); if (sorted_tokens.empty()) {
on_internal_error(tlogger, "Token metadata is empty");
}
auto prev_tok = sorted_tokens.back(); auto prev_tok = sorted_tokens.back();
for (const auto& tok : sorted_tokens) { for (const auto& tok : sorted_tokens) {
co_await coroutine::maybe_yield(); co_await coroutine::maybe_yield();
@@ -562,7 +563,7 @@ static future<> scan_table_ranges(
expiration_service::stats& expiration_stats) expiration_service::stats& expiration_stats)
{ {
const schema_ptr& s = scan_ctx.s; const schema_ptr& s = scan_ctx.s;
throwing_assert(partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results. SCYLLA_ASSERT (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr, auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr,
*scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr); *scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr);
while (!p->is_exhausted()) { while (!p->is_exhausted()) {
@@ -592,7 +593,7 @@ static future<> scan_table_ranges(
if (retries >= 10) { if (retries >= 10) {
// Don't get stuck forever asking the same page, maybe there's // Don't get stuck forever asking the same page, maybe there's
// a bug or a real problem in several replicas. Give up on // a bug or a real problem in several replicas. Give up on
// this scan and retry the scan from a random position later, // this scan an retry the scan from a random position later,
// in the next scan period. // in the next scan period.
throw runtime_exception("scanner thread failed after too many timeouts for the same page"); throw runtime_exception("scanner thread failed after too many timeouts for the same page");
} }
@@ -639,38 +640,13 @@ static future<> scan_table_ranges(
} }
} else { } else {
// For a real column to contain an expiration time, it // For a real column to contain an expiration time, it
// must be a numeric type. We currently support decimal // must be a numeric type.
// (used by Alternator TTL) as well as bigint, int and // FIXME: Currently we only support decimal_type (which is
// timestamp (used by CQL per-row TTL). // what Alternator uses), but other numeric types can be
switch (meta[*expiration_column]->type->get_kind()) { // supported as well to make this feature more useful in CQL.
case abstract_type::kind::decimal: // Note that kind::decimal is also checked above.
// Used by Alternator TTL for key columns not stored big_decimal n = value_cast<big_decimal>(v);
// in the map. The value is in seconds, fractional expired = is_expired(n, now);
// part is ignored.
expired = is_expired(value_cast<big_decimal>(v), now);
break;
case abstract_type::kind::long_kind:
// Used by CQL per-row TTL. The value is in seconds.
expired = is_expired(gc_clock::time_point(std::chrono::seconds(value_cast<int64_t>(v))), now);
break;
case abstract_type::kind::int32:
// Used by CQL per-row TTL. The value is in seconds.
// Using int type is not recommended because it will
// overflow in 2038, but we support it to allow users
// to use existing int columns for expiration.
expired = is_expired(gc_clock::time_point(std::chrono::seconds(value_cast<int32_t>(v))), now);
break;
case abstract_type::kind::timestamp:
// Used by CQL per-row TTL. The value is in milliseconds
// but we truncate it to gc_clock's precision (whole seconds).
expired = is_expired(gc_clock::time_point(std::chrono::duration_cast<gc_clock::duration>(value_cast<db_clock::time_point>(v).time_since_epoch())), now);
break;
default:
// Should never happen - we verified the column's type
// before starting the scan.
[[unlikely]]
on_internal_error(tlogger, format("expiration scanner value of unsupported type {} in column {}", meta[*expiration_column]->type->cql3_type_name(), scan_ctx.column_name) );
}
} }
if (expired) { if (expired) {
expiration_stats.items_deleted++; expiration_stats.items_deleted++;
@@ -732,12 +708,16 @@ static future<bool> scan_table(
co_return false; co_return false;
} }
// attribute_name may be one of the schema's columns (in Alternator, this // attribute_name may be one of the schema's columns (in Alternator, this
// means a key column, in CQL it's a regular column), or an element in // means it's a key column), or an element in Alternator's attrs map
// Alternator's attrs map encoded in Alternator's JSON encoding (which we // encoded in Alternator's JSON encoding.
// decode). If attribute_name is a real column, in Alternator it will have // FIXME: To make this less Alternators-specific, we should encode in the
// the type decimal, counting seconds since the UNIX epoch, while in CQL // single key's value three things:
// it will one of the types bigint or int (counting seconds) or timestamp // 1. The name of a column
// (counting milliseconds). // 2. Optionally if column is a map, a member in the map
// 3. The deserializer for the value: CQL or Alternator (JSON).
// The deserializer can be guessed: If the given column or map item is
// numeric, it can be used directly. If it is a "bytes" type, it needs to
// be deserialized using Alternator's deserializer.
bytes column_name = to_bytes(*attribute_name); bytes column_name = to_bytes(*attribute_name);
const column_definition *cd = s->get_column_definition(column_name); const column_definition *cd = s->get_column_definition(column_name);
std::optional<std::string> member; std::optional<std::string> member;
@@ -756,14 +736,11 @@ static future<bool> scan_table(
data_type column_type = cd->type; data_type column_type = cd->type;
// Verify that the column has the right type: If "member" exists // Verify that the column has the right type: If "member" exists
// the column must be a map, and if it doesn't, the column must // the column must be a map, and if it doesn't, the column must
// be decimal_type (Alternator), bigint, int or timestamp (CQL). // (currently) be a decimal_type. If the column has the wrong type
// If the column has the wrong type nothing can get expired in // nothing can get expired in this table, and it's pointless to
// this table, and it's pointless to scan it. // scan it.
if ((member && column_type->get_kind() != abstract_type::kind::map) || if ((member && column_type->get_kind() != abstract_type::kind::map) ||
(!member && column_type->get_kind() != abstract_type::kind::decimal && (!member && column_type->get_kind() != abstract_type::kind::decimal)) {
column_type->get_kind() != abstract_type::kind::long_kind &&
column_type->get_kind() != abstract_type::kind::int32 &&
column_type->get_kind() != abstract_type::kind::timestamp)) {
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name()); tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
co_return false; co_return false;
} }
@@ -790,7 +767,7 @@ static future<bool> scan_table(
// by tasking another node to take over scanning of the dead node's primary // by tasking another node to take over scanning of the dead node's primary
// ranges. What we do here is that this node will also check expiration // ranges. What we do here is that this node will also check expiration
// on its *secondary* ranges - but only those whose primary owner is down. // on its *secondary* ranges - but only those whose primary owner is down.
auto tablet_secondary_replica = tablet_map.get_secondary_replica(*tablet, erm->get_topology()); // throws if no secondary replica auto tablet_secondary_replica = tablet_map.get_secondary_replica(*tablet); // throws if no secondary replica
if (tablet_secondary_replica.host == my_host_id && tablet_secondary_replica.shard == this_shard_id()) { if (tablet_secondary_replica.host == my_host_id && tablet_secondary_replica.shard == this_shard_id()) {
if (!gossiper.is_alive(tablet_primary_replica.host)) { if (!gossiper.is_alive(tablet_primary_replica.host)) {
co_await scan_tablet(*tablet, proxy, abort_source, page_sem, expiration_stats, scan_ctx, tablet_map); co_await scan_tablet(*tablet, proxy, abort_source, page_sem, expiration_stats, scan_ctx, tablet_map);
@@ -901,10 +878,12 @@ future<> expiration_service::run() {
future<> expiration_service::start() { future<> expiration_service::start() {
// Called by main() on each shard to start the expiration-service // Called by main() on each shard to start the expiration-service
// thread. Just runs run() in the background and allows stop(). // thread. Just runs run() in the background and allows stop().
if (!shutting_down()) { if (_db.features().alternator_ttl) {
_end = run().handle_exception([] (std::exception_ptr ep) { if (!shutting_down()) {
tlogger.error("expiration_service failed: {}", ep); _end = run().handle_exception([] (std::exception_ptr ep) {
}); tlogger.error("expiration_service failed: {}", ep);
});
}
} }
return make_ready_future<>(); return make_ready_future<>();
} }

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -30,7 +30,7 @@ namespace alternator {
// expiration_service is a sharded service responsible for cleaning up expired // expiration_service is a sharded service responsible for cleaning up expired
// items in all tables with per-item expiration enabled. Currently, this means // items in all tables with per-item expiration enabled. Currently, this means
// Alternator tables with TTL configured via an UpdateTimeToLive request. // Alternator tables with TTL configured via a UpdateTimeToLeave request.
class expiration_service final : public seastar::peering_sharded_service<expiration_service> { class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
public: public:
// Object holding per-shard statistics related to the expiration service. // Object holding per-shard statistics related to the expiration service.
@@ -52,7 +52,7 @@ private:
data_dictionary::database _db; data_dictionary::database _db;
service::storage_proxy& _proxy; service::storage_proxy& _proxy;
gms::gossiper& _gossiper; gms::gossiper& _gossiper;
// _end is set by start(), and resolves when the background service // _end is set by start(), and resolves when the the background service
// started by it ends. To ask the background service to end, _abort_source // started by it ends. To ask the background service to end, _abort_source
// should be triggered. stop() below uses both _abort_source and _end. // should be triggered. stop() below uses both _abort_source and _end.
std::optional<future<>> _end; std::optional<future<>> _end;

View File

@@ -1,26 +0,0 @@
/*
* Copyright 2026-present ScyllaDB
*/
/*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
*/
#pragma once
#include "seastarx.hh"
#include <seastar/core/sstring.hh>
namespace alternator {
// We use the table tag TTL_TAG_KEY ("system:ttl_attribute") to remember
// which attribute was chosen as the expiration-time attribute for
// Alternator's TTL and CQL's per-row TTL features.
// Currently, the *value* of this tag is simply the name of the attribute:
// It can refer to a real column or if that doesn't exist, to a member of
// the ":attrs" map column (which Alternator uses).
extern const sstring TTL_TAG_KEY;
} // namespace alternator
// let users use TTL_TAG_KEY without the "alternator::" prefix,
// to make it easier to move it to a different namespace later.
using alternator::TTL_TAG_KEY;

View File

@@ -12,7 +12,7 @@
"operations":[ "operations":[
{ {
"method":"POST", "method":"POST",
"summary":"Resets authorized prepared statements cache", "summary":"Reset cache",
"type":"void", "type":"void",
"nickname":"authorization_cache_reset", "nickname":"authorization_cache_reset",
"produces":[ "produces":[

View File

@@ -243,7 +243,7 @@
"GOSSIP_DIGEST_SYN", "GOSSIP_DIGEST_SYN",
"GOSSIP_DIGEST_ACK2", "GOSSIP_DIGEST_ACK2",
"GOSSIP_SHUTDOWN", "GOSSIP_SHUTDOWN",
"UNUSED__DEFINITIONS_UPDATE", "DEFINITIONS_UPDATE",
"TRUNCATE", "TRUNCATE",
"UNUSED__REPLICATION_FINISHED", "UNUSED__REPLICATION_FINISHED",
"MIGRATION_REQUEST", "MIGRATION_REQUEST",

View File

@@ -743,7 +743,7 @@
"parameters":[ "parameters":[
{ {
"name":"tag", "name":"tag",
"description":"The snapshot tag to delete. If omitted, all snapshots are removed.", "description":"the tag given to the snapshot",
"required":false, "required":false,
"allowMultiple":false, "allowMultiple":false,
"type":"string", "type":"string",
@@ -751,7 +751,7 @@
}, },
{ {
"name":"kn", "name":"kn",
"description":"Comma-separated list of keyspace names to delete snapshots from. If omitted, snapshots are deleted from all keyspaces.", "description":"Comma-separated keyspaces name that their snapshot will be deleted",
"required":false, "required":false,
"allowMultiple":false, "allowMultiple":false,
"type":"string", "type":"string",
@@ -759,7 +759,7 @@
}, },
{ {
"name":"cf", "name":"cf",
"description":"A table name used to filter which table's snapshots are deleted. If omitted or empty, snapshots for all tables are eligible. When provided together with 'kn', the table is looked up in each listed keyspace independently. For secondary indexes, the logical index name (e.g. 'myindex') can be used and is resolved automatically.", "description":"an optional table name that its snapshot will be deleted",
"required":false, "required":false,
"allowMultiple":false, "allowMultiple":false,
"type":"string", "type":"string",
@@ -1295,45 +1295,6 @@
} }
] ]
}, },
{
"path":"/storage_service/logstor_compaction",
"operations":[
{
"method":"POST",
"summary":"Trigger compaction of the key-value storage",
"type":"void",
"nickname":"logstor_compaction",
"produces":[
"application/json"
],
"parameters":[
{
"name":"major",
"description":"When true, perform a major compaction",
"required":false,
"allowMultiple":false,
"type":"boolean",
"paramType":"query"
}
]
}
]
},
{
"path":"/storage_service/logstor_flush",
"operations":[
{
"method":"POST",
"summary":"Trigger flush of logstor storage",
"type":"void",
"nickname":"logstor_flush",
"produces":[
"application/json"
],
"parameters":[]
}
]
},
{ {
"path":"/storage_service/active_repair/", "path":"/storage_service/active_repair/",
"operations":[ "operations":[
@@ -3090,7 +3051,7 @@
}, },
{ {
"name":"incremental_mode", "name":"incremental_mode",
"description":"Set the incremental repair mode. Can be 'disabled', 'incremental', or 'full'. 'incremental': The incremental repair logic is enabled. Unrepaired sstables will be included for repair. Repaired sstables will be skipped. The incremental repair states will be updated after repair. 'full': The incremental repair logic is enabled. Both repaired and unrepaired sstables will be included for repair. The incremental repair states will be updated after repair. 'disabled': The incremental repair logic is disabled completely. The incremental repair states, e.g., repaired_at in sstables and sstables_repaired_at in the system.tablets table, will not be updated after repair. When the option is not provided, it defaults to incremental mode.", "description":"Set the incremental repair mode. Can be 'disabled', 'incremental', or 'full'. 'incremental': The incremental repair logic is enabled. Unrepaired sstables will be included for repair. Repaired sstables will be skipped. The incremental repair states will be updated after repair. 'full': The incremental repair logic is enabled. Both repaired and unrepaired sstables will be included for repair. The incremental repair states will be updated after repair. 'disabled': The incremental repair logic is disabled completely. The incremental repair states, e.g., repaired_at in sstables and sstables_repaired_at in the system.tablets table, will not be updated after repair. When the option is not provided, it defaults to 'disabled' mode.",
"required":false, "required":false,
"allowMultiple":false, "allowMultiple":false,
"type":"string", "type":"string",
@@ -3124,125 +3085,6 @@
} }
] ]
}, },
{
"path":"/storage_service/tablets/snapshots",
"operations":[
{
"method":"POST",
"summary":"Takes the snapshot for the given keyspaces/tables. A snapshot name must be specified.",
"type":"void",
"nickname":"take_cluster_snapshot",
"produces":[
"application/json"
],
"parameters":[
{
"name":"tag",
"description":"the tag given to the snapshot",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"keyspace",
"description":"Keyspace(s) to snapshot. Multiple keyspaces can be provided using a comma-separated list. If omitted, snapshot all keyspaces.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"table",
"description":"Table(s) to snapshot. Multiple tables (in a single keyspace) can be provided using a comma-separated list. If omitted, snapshot all tables in the given keyspace(s).",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
}
]
}
]
},
{
"path":"/storage_service/vnode_tablet_migrations/keyspaces/{keyspace}",
"operations":[{
"method":"POST",
"summary":"Start vnodes-to-tablets migration for all tables in a keyspace",
"type":"void",
"nickname":"create_vnode_tablet_migration",
"produces":["application/json"],
"parameters":[
{
"name":"keyspace",
"description":"Keyspace name",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
},
{
"method":"GET",
"summary":"Get a keyspace's vnodes-to-tablets migration status",
"type":"vnode_tablet_migration_status",
"nickname":"get_vnode_tablet_migration",
"produces":["application/json"],
"parameters":[
{
"name":"keyspace",
"description":"Keyspace name",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
}]
},
{
"path":"/storage_service/vnode_tablet_migrations/node/storage_mode",
"operations":[{
"method":"PUT",
"summary":"Set the intended storage mode for this node during vnodes-to-tablets migration",
"type":"void",
"nickname":"set_vnode_tablet_migration_node_storage_mode",
"produces":["application/json"],
"parameters":[
{
"name":"intended_mode",
"description":"Intended storage mode (tablets or vnodes)",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"query"
}
]
}]
},
{
"path":"/storage_service/vnode_tablet_migrations/keyspaces/{keyspace}/finalization",
"operations":[{
"method":"POST",
"summary":"Finalize vnodes-to-tablets migration for all tables in a keyspace",
"type":"void",
"nickname":"finalize_vnode_tablet_migration",
"produces":["application/json"],
"parameters":[
{
"name":"keyspace",
"description":"Keyspace name",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
}]
},
{ {
"path":"/storage_service/quiesce_topology", "path":"/storage_service/quiesce_topology",
"operations":[ "operations":[
@@ -3345,38 +3187,6 @@
} }
] ]
}, },
{
"path":"/storage_service/logstor_info",
"operations":[
{
"method":"GET",
"summary":"Logstor segment information for one table",
"type":"table_logstor_info",
"nickname":"logstor_info",
"produces":[
"application/json"
],
"parameters":[
{
"name":"keyspace",
"description":"The keyspace",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"table",
"description":"table name",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"query"
}
]
}
]
},
{ {
"path":"/storage_service/retrain_dict", "path":"/storage_service/retrain_dict",
"operations":[ "operations":[
@@ -3785,47 +3595,6 @@
} }
} }
}, },
"logstor_hist_bucket":{
"id":"logstor_hist_bucket",
"properties":{
"bucket":{
"type":"long"
},
"count":{
"type":"long"
},
"min_data_size":{
"type":"long"
},
"max_data_size":{
"type":"long"
}
}
},
"table_logstor_info":{
"id":"table_logstor_info",
"description":"Per-table logstor segment distribution",
"properties":{
"keyspace":{
"type":"string"
},
"table":{
"type":"string"
},
"compaction_groups":{
"type":"long"
},
"segments":{
"type":"long"
},
"data_size_histogram":{
"type":"array",
"items":{
"$ref":"logstor_hist_bucket"
}
}
}
},
"tablet_repair_result":{ "tablet_repair_result":{
"id":"tablet_repair_result", "id":"tablet_repair_result",
"description":"Tablet repair result", "description":"Tablet repair result",
@@ -3860,45 +3629,6 @@
"description":"The resulting compression ratio (estimated on a random sample of files)" "description":"The resulting compression ratio (estimated on a random sample of files)"
} }
} }
},
"vnode_tablet_migration_node_status":{
"id":"vnode_tablet_migration_node_status",
"description":"Node storage mode info during vnodes-to-tablets migration",
"properties":{
"host_id":{
"type":"string",
"description":"The host ID"
},
"current_mode":{
"type":"string",
"description":"The current storage mode: `vnodes` or `tablets`"
},
"intended_mode":{
"type":"string",
"description":"The intended storage mode: `vnodes` or `tablets`"
}
}
},
"vnode_tablet_migration_status":{
"id":"vnode_tablet_migration_status",
"description":"Vnodes-to-tablets migration status for a keyspace",
"properties":{
"keyspace":{
"type":"string",
"description":"The keyspace name"
},
"status":{
"type":"string",
"description":"The migration status: `vnodes` (not started), `migrating_to_tablets` (in progress), or `tablets` (complete)"
},
"nodes":{
"type":"array",
"items":{
"$ref":"vnode_tablet_migration_node_status"
},
"description":"Per-node storage mode information. Empty if the keyspace is not being migrated."
}
}
} }
} }
} }

View File

@@ -209,21 +209,6 @@
"parameters":[] "parameters":[]
} }
] ]
},
{
"path":"/system/chosen_sstable_version",
"operations":[
{
"method":"GET",
"summary":"Get sstable version currently chosen for use in new sstables",
"type":"string",
"nickname":"get_chosen_sstable_version",
"produces":[
"application/json"
],
"parameters":[]
}
]
} }
] ]
} }

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "api.hh" #include "api.hh"
@@ -122,9 +122,9 @@ future<> unset_thrift_controller(http_context& ctx) {
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_thrift_controller(ctx, r); }); return ctx.http_server.set_routes([&ctx] (routes& r) { unset_thrift_controller(ctx, r); });
} }
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& ssc, service::raft_group0_client& group0_client) { future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client& group0_client) {
return ctx.http_server.set_routes([&ctx, &ss, &ssc, &group0_client] (routes& r) { return ctx.http_server.set_routes([&ctx, &ss, &group0_client] (routes& r) {
set_storage_service(ctx, r, ss, ssc, group0_client); set_storage_service(ctx, r, ss, group0_client);
}); });
} }

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -23,6 +23,31 @@
namespace api { namespace api {
template<class T>
std::vector<T> map_to_key_value(const std::map<sstring, sstring>& map) {
std::vector<T> res;
res.reserve(map.size());
for (const auto& [key, value] : map) {
res.push_back(T());
res.back().key = key;
res.back().value = value;
}
return res;
}
template<class T, class MAP>
std::vector<T>& map_to_key_value(const MAP& map, std::vector<T>& res) {
res.reserve(res.size() + std::size(map));
for (const auto& [key, value] : map) {
T val;
val.key = fmt::to_string(key);
val.value = fmt::to_string(value);
res.push_back(val);
}
return res;
}
template <typename T, typename S = T> template <typename T, typename S = T>
T map_sum(T&& dest, const S& src) { T map_sum(T&& dest, const S& src) {
for (const auto& i : src) { for (const auto& i : src) {

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once
@@ -98,7 +98,7 @@ future<> set_server_config(http_context& ctx, db::config& cfg);
future<> unset_server_config(http_context& ctx); future<> unset_server_config(http_context& ctx);
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch); future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
future<> unset_server_snitch(http_context& ctx); future<> unset_server_snitch(http_context& ctx);
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>&, service::raft_group0_client&); future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client&);
future<> unset_server_storage_service(http_context& ctx); future<> unset_server_storage_service(http_context& ctx);
future<> set_server_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr); future<> set_server_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr);
future<> unset_server_client_routes(http_context& ctx); future<> unset_server_client_routes(http_context& ctx);

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "api/api-doc/authorization_cache.json.hh" #include "api/api-doc/authorization_cache.json.hh"

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "cache_service.hh" #include "cache_service.hh"

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -4,7 +4,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include <seastar/http/short_streams.hh> #include <seastar/http/short_streams.hh>

View File

@@ -4,7 +4,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "collectd.hh" #include "collectd.hh"

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include <fmt/ranges.h> #include <fmt/ranges.h>
@@ -18,9 +18,7 @@
#include "utils/assert.hh" #include "utils/assert.hh"
#include "utils/estimated_histogram.hh" #include "utils/estimated_histogram.hh"
#include <algorithm> #include <algorithm>
#include <sstream>
#include "db/data_listeners.hh" #include "db/data_listeners.hh"
#include "utils/hash.hh"
#include "storage_service.hh" #include "storage_service.hh"
#include "compaction/compaction_manager.hh" #include "compaction/compaction_manager.hh"
#include "unimplemented.hh" #include "unimplemented.hh"
@@ -344,56 +342,6 @@ uint64_t accumulate_on_active_memtables(replica::table& t, noncopyable_function<
return ret; return ret;
} }
static
future<json::json_return_type>
rest_toppartitions_generic(sharded<replica::database>& db, std::unique_ptr<http::request> req) {
bool filters_provided = false;
std::unordered_set<std::tuple<sstring, sstring>, utils::tuple_hash> table_filters {};
if (auto filters = req->get_query_param("table_filters"); !filters.empty()) {
filters_provided = true;
std::stringstream ss { filters };
std::string filter;
while (!filters.empty() && ss.good()) {
std::getline(ss, filter, ',');
table_filters.emplace(parse_fully_qualified_cf_name(filter));
}
}
std::unordered_set<sstring> keyspace_filters {};
if (auto filters = req->get_query_param("keyspace_filters"); !filters.empty()) {
filters_provided = true;
std::stringstream ss { filters };
std::string filter;
while (!filters.empty() && ss.good()) {
std::getline(ss, filter, ',');
keyspace_filters.emplace(std::move(filter));
}
}
// when the query is empty return immediately
if (filters_provided && table_filters.empty() && keyspace_filters.empty()) {
apilog.debug("toppartitions query: processing results");
cf::toppartitions_query_results results;
results.read_cardinality = 0;
results.write_cardinality = 0;
return make_ready_future<json::json_return_type>(results);
}
api::req_param<std::chrono::milliseconds, unsigned> duration{*req, "duration", 1000ms};
api::req_param<unsigned> capacity(*req, "capacity", 256);
api::req_param<unsigned> list_size(*req, "list_size", 10);
apilog.info("toppartitions query: #table_filters={} #keyspace_filters={} duration={} list_size={} capacity={}",
!table_filters.empty() ? std::to_string(table_filters.size()) : "all", !keyspace_filters.empty() ? std::to_string(keyspace_filters.size()) : "all", duration.value, list_size.value, capacity.value);
return seastar::do_with(db::toppartitions_query(db, std::move(table_filters), std::move(keyspace_filters), duration.value, list_size, capacity), [] (db::toppartitions_query& q) {
return run_toppartitions_query(q);
});
}
void set_column_family(http_context& ctx, routes& r, sharded<replica::database>& db) { void set_column_family(http_context& ctx, routes& r, sharded<replica::database>& db) {
cf::get_column_family_name.set(r, [&db] (const_req req){ cf::get_column_family_name.set(r, [&db] (const_req req){
std::vector<sstring> res; std::vector<sstring> res;
@@ -1099,10 +1047,6 @@ void set_column_family(http_context& ctx, routes& r, sharded<replica::database>&
}); });
}); });
ss::toppartitions_generic.set(r, [&db] (std::unique_ptr<http::request> req) {
return rest_toppartitions_generic(db, std::move(req));
});
cf::force_major_compaction.set(r, [&ctx, &db](std::unique_ptr<http::request> req) -> future<json::json_return_type> { cf::force_major_compaction.set(r, [&ctx, &db](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
if (!req->get_query_param("split_output").empty()) { if (!req->get_query_param("split_output").empty()) {
fail(unimplemented::cause::API); fail(unimplemented::cause::API);
@@ -1269,7 +1213,6 @@ void unset_column_family(http_context& ctx, routes& r) {
cf::get_sstable_count_per_level.unset(r); cf::get_sstable_count_per_level.unset(r);
cf::get_sstables_for_key.unset(r); cf::get_sstables_for_key.unset(r);
cf::toppartitions.unset(r); cf::toppartitions.unset(r);
ss::toppartitions_generic.unset(r);
cf::force_major_compaction.unset(r); cf::force_major_compaction.unset(r);
ss::get_load.unset(r); ss::get_load.unset(r);
ss::get_metrics_load.unset(r); ss::get_metrics_load.unset(r);

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "commitlog.hh" #include "commitlog.hh"

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include <seastar/core/coroutine.hh> #include <seastar/core/coroutine.hh>

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "api/api.hh" #include "api/api.hh"
@@ -82,16 +82,15 @@ void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx
}); });
}); });
cs::find_config_id.set(r, [&cfg] (std::unique_ptr<http::request> req) -> future<json::json_return_type> { cs::find_config_id.set(r, [&cfg] (const_req r) {
auto id = req->get_path_param("id"); auto id = r.get_path_param("id");
auto value = co_await cfg.value_as_json_string_for_name(id); for (auto&& cfg_ref : cfg.values()) {
if (!value) { auto&& cfg = cfg_ref.get();
throw bad_param_exception(sstring("No such config entry: ") + id); if (id == cfg.name()) {
return cfg.value_as_json();
}
} }
//value is already a json string throw bad_param_exception(sstring("No such config entry: ") + id);
json::json_return_type ret{json::json_void()};
ret._res = std::move(*value);
co_return ret;
}); });
sp::get_rpc_timeout.set(r, [&cfg](const_req req) { sp::get_rpc_timeout.set(r, [&cfg](const_req req) {

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#pragma once #pragma once

View File

@@ -3,7 +3,7 @@
*/ */
/* /*
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1 * SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
*/ */
#include "build_mode.hh" #include "build_mode.hh"

Some files were not shown because too many files have changed in this diff Show More