mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-19 16:15:07 +00:00
Compare commits
1 Commits
next
...
ykaul/skip
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ba21da2e02 |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -92,10 +92,6 @@ test/boost/querier_cache_test.cc @denesb
|
||||
# PYTEST-BASED CQL TESTS
|
||||
test/cqlpy/* @nyh
|
||||
|
||||
# TEST FRAMEWORK
|
||||
test/pylib/* @xtrey
|
||||
test.py @xtrey
|
||||
|
||||
# RAFT
|
||||
raft/* @kbr-scylla @gleb-cloudius @kostja
|
||||
test/raft/* @kbr-scylla @gleb-cloudius @kostja
|
||||
|
||||
2
.github/scripts/check-license.py
vendored
2
.github/scripts/check-license.py
vendored
@@ -4,7 +4,7 @@
|
||||
# Copyright (C) 2024-present ScyllaDB
|
||||
#
|
||||
#
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
# SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
#
|
||||
|
||||
import argparse
|
||||
|
||||
@@ -10,9 +10,6 @@ on:
|
||||
types: [labeled, unlabeled]
|
||||
branches: [master, next, enterprise]
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
check-commit:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -33,7 +30,7 @@ jobs:
|
||||
echo "DEFAULT_BRANCH=master" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
|
||||
@@ -5,9 +5,6 @@ on:
|
||||
types: [opened, reopened, edited]
|
||||
branches: [branch-*]
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
check-fixes-prefix:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -16,7 +13,7 @@ jobs:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Check PR body for "Fixes" prefix patterns
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const body = context.payload.pull_request.body;
|
||||
|
||||
5
.github/workflows/build-scylla.yaml
vendored
5
.github/workflows/build-scylla.yaml
vendored
@@ -12,9 +12,6 @@ on:
|
||||
description: 'the md5sum for scylla executable'
|
||||
value: ${{ jobs.build.outputs.md5sum }}
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
uses: ./.github/workflows/read-toolchain.yaml
|
||||
@@ -27,7 +24,7 @@ jobs:
|
||||
outputs:
|
||||
md5sum: ${{ steps.checksum.outputs.md5sum }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Generate the building system
|
||||
|
||||
@@ -7,11 +7,6 @@ on:
|
||||
- synchronize
|
||||
- reopened
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
statuses: write
|
||||
|
||||
jobs:
|
||||
validate_pr_author_email:
|
||||
uses: scylladb/github-automation/.github/workflows/validate_pr_author_email.yml@main
|
||||
|
||||
7
.github/workflows/check-license-header.yaml
vendored
7
.github/workflows/check-license-header.yaml
vendored
@@ -7,9 +7,8 @@ on:
|
||||
|
||||
env:
|
||||
HEADER_CHECK_LINES: 10
|
||||
LICENSE: "LicenseRef-ScyllaDB-Source-Available-1.1"
|
||||
LICENSE: "LicenseRef-ScyllaDB-Source-Available-1.0"
|
||||
CHECKED_EXTENSIONS: ".cc .hh .py"
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
check-license-headers:
|
||||
@@ -20,7 +19,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -41,7 +40,7 @@ jobs:
|
||||
|
||||
- name: Comment on PR if check fails
|
||||
if: failure()
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const license = '${{ env.LICENSE }}';
|
||||
|
||||
3
.github/workflows/clang-nightly.yaml
vendored
3
.github/workflows/clang-nightly.yaml
vendored
@@ -9,7 +9,6 @@ env:
|
||||
# use the development branch explicitly
|
||||
CLANG_VERSION: 21
|
||||
BUILD_DIR: build
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
@@ -33,7 +32,7 @@ jobs:
|
||||
steps:
|
||||
- run: |
|
||||
sudo dnf -y install git
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Install build dependencies
|
||||
|
||||
3
.github/workflows/clang-tidy.yaml
vendored
3
.github/workflows/clang-tidy.yaml
vendored
@@ -18,7 +18,6 @@ env:
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
BUILD_DIR: build
|
||||
CLANG_TIDY_CHECKS: '-*,bugprone-use-after-move'
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
permissions: {}
|
||||
|
||||
@@ -43,7 +42,7 @@ jobs:
|
||||
IMAGE: ${{ needs.read-toolchain.image }}
|
||||
run: |
|
||||
echo ${{ needs.read-toolchain.image }}
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- run: |
|
||||
|
||||
@@ -7,16 +7,13 @@ on:
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
comment-and-close:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Comment and close if author email is scylladb.com
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
|
||||
6
.github/workflows/codespell.yaml
vendored
6
.github/workflows/codespell.yaml
vendored
@@ -4,15 +4,13 @@ on:
|
||||
branches:
|
||||
- master
|
||||
permissions: {}
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: codespell-project/actions-codespell@8f01853be192eb0f849a5c7d721450e7a467c579 # v2.2
|
||||
- uses: actions/checkout@v4
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
only_warn: 1
|
||||
ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison,iif,tread"
|
||||
|
||||
38
.github/workflows/compare-build-systems.yaml
vendored
38
.github/workflows/compare-build-systems.yaml
vendored
@@ -1,38 +0,0 @@
|
||||
name: Compare Build Systems
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'configure.py'
|
||||
- '**/CMakeLists.txt'
|
||||
- 'cmake/**'
|
||||
- 'scripts/compare_build_systems.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
# cancel the in-progress run upon a repush
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
uses: ./.github/workflows/read-toolchain.yaml
|
||||
compare:
|
||||
name: Compare configure.py vs CMake
|
||||
needs:
|
||||
- read-toolchain
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{ needs.read-toolchain.outputs.image }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Compare build systems
|
||||
run: |
|
||||
git config --global --add safe.directory $GITHUB_WORKSPACE
|
||||
python3 scripts/compare_build_systems.py --ci
|
||||
5
.github/workflows/conflict_reminder.yaml
vendored
5
.github/workflows/conflict_reminder.yaml
vendored
@@ -12,16 +12,13 @@ on:
|
||||
schedule:
|
||||
- cron: '0 10 * * 1' # Runs every Monday at 10:00am
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
notify_conflict_prs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Notify PR Authors of Conflicts
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
console.log("Starting conflict reminder script...");
|
||||
|
||||
@@ -13,9 +13,6 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -24,12 +21,12 @@ jobs:
|
||||
security-events: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Differential ShellCheck
|
||||
uses: redhat-plumbers-in-action/differential-shellcheck@d965e66ec0b3b2f821f75c8eff9b12442d9a7d1e # v5.5.6
|
||||
uses: redhat-plumbers-in-action/differential-shellcheck@v5
|
||||
with:
|
||||
severity: warning
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
7
.github/workflows/docs-pages.yaml
vendored
7
.github/workflows/docs-pages.yaml
vendored
@@ -5,7 +5,6 @@ name: "Docs / Publish"
|
||||
env:
|
||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||
DEFAULT_BRANCH: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'master' }}
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -26,17 +25,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0
|
||||
uses: astral-sh/setup-uv@v6
|
||||
- name: Set up env
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||
- name: Build docs
|
||||
|
||||
7
.github/workflows/docs-pr.yaml
vendored
7
.github/workflows/docs-pr.yaml
vendored
@@ -7,7 +7,6 @@ permissions:
|
||||
|
||||
env:
|
||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -23,16 +22,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@cec208311dfd045dd5311c1add060b2062131d57 # v8.0.0
|
||||
uses: astral-sh/setup-uv@v6
|
||||
- name: Set up env
|
||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||
- name: Build docs
|
||||
|
||||
7
.github/workflows/docs-validate-metrics.yml
vendored
7
.github/workflows/docs-validate-metrics.yml
vendored
@@ -3,9 +3,6 @@ name: Docs / Validate metrics
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -24,12 +21,12 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
|
||||
5
.github/workflows/iwyu.yaml
vendored
5
.github/workflows/iwyu.yaml
vendored
@@ -13,7 +13,6 @@ env:
|
||||
# supposed to be processed by idl-compiler.py, so we don't check them using the cleaner
|
||||
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation mutation_writer node_ops raft redis replica service
|
||||
SEASTAR_BAD_INCLUDE_OUTPUT_PATH: build/seastar-bad-include.log
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -33,7 +32,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
container: ${{ needs.read-toolchain.outputs.image }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Generate compilation database
|
||||
@@ -90,7 +89,7 @@ jobs:
|
||||
| tee "$SEASTAR_BAD_INCLUDE_OUTPUT_PATH"
|
||||
- run: |
|
||||
echo "::remove-matcher owner=seastar-bad-include::"
|
||||
- uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f # v7.0.0
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Logs
|
||||
path: |
|
||||
|
||||
@@ -7,7 +7,6 @@ on:
|
||||
|
||||
env:
|
||||
DEFAULT_BRANCH: 'master'
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
mark-ready:
|
||||
@@ -18,7 +17,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
|
||||
@@ -5,8 +5,6 @@ on:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
jobs:
|
||||
label:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -17,7 +15,7 @@ jobs:
|
||||
steps:
|
||||
- name: Wait for label to be added
|
||||
run: sleep 1m
|
||||
- uses: mheap/github-action-required-labels@0ac283b4e65c1fb28ce6079dea5546ceca98ccbe # v5.5.2
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
with:
|
||||
mode: minimum
|
||||
count: 1
|
||||
|
||||
5
.github/workflows/read-toolchain.yaml
vendored
5
.github/workflows/read-toolchain.yaml
vendored
@@ -7,9 +7,6 @@ on:
|
||||
description: "the toolchain docker image"
|
||||
value: ${{ jobs.read-toolchain.outputs.image }}
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -18,7 +15,7 @@ jobs:
|
||||
outputs:
|
||||
image: ${{ steps.read.outputs.image }}
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: tools/toolchain/image
|
||||
sparse-checkout-cone-mode: false
|
||||
|
||||
5
.github/workflows/seastar.yaml
vendored
5
.github/workflows/seastar.yaml
vendored
@@ -13,7 +13,6 @@ concurrency:
|
||||
|
||||
env:
|
||||
BUILD_DIR: build
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
read-toolchain:
|
||||
@@ -30,12 +29,12 @@ jobs:
|
||||
- RelWithDebInfo
|
||||
- Dev
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- run: |
|
||||
rm -rf seastar
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
repository: scylladb/seastar
|
||||
submodules: true
|
||||
|
||||
5
.github/workflows/sync-labels.yaml
vendored
5
.github/workflows/sync-labels.yaml
vendored
@@ -7,9 +7,6 @@ on:
|
||||
issues:
|
||||
types: [labeled, unlabeled]
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
label-sync:
|
||||
if: ${{ github.repository == 'scylladb/scylladb' }}
|
||||
@@ -24,7 +21,7 @@ jobs:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
.github/scripts/sync_labels.py
|
||||
|
||||
7
.github/workflows/trigger_ci.yaml
vendored
7
.github/workflows/trigger_ci.yaml
vendored
@@ -5,10 +5,7 @@ on:
|
||||
types: [opened, reopened, synchronize]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
|
||||
jobs:
|
||||
trigger-ci:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -18,7 +15,7 @@ jobs:
|
||||
GITHUB_CONTEXT: ${{ toJson(github) }}
|
||||
run: echo "$GITHUB_CONTEXT"
|
||||
- name: Checkout PR code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0 # Needed to access full history
|
||||
ref: ${{ github.event.pull_request.head.ref }}
|
||||
|
||||
5
.github/workflows/urgent_issue_reminder.yml
vendored
5
.github/workflows/urgent_issue_reminder.yml
vendored
@@ -4,16 +4,13 @@ on:
|
||||
schedule:
|
||||
- cron: '10 8 * * *' # Runs daily at 8 AM
|
||||
|
||||
env:
|
||||
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: true
|
||||
|
||||
jobs:
|
||||
reminder:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Send reminders
|
||||
uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const labelFilters = ['P0', 'P1', 'Field-Tier1','status/release blocker', 'status/regression'];
|
||||
|
||||
@@ -2,12 +2,6 @@ cmake_minimum_required(VERSION 3.27)
|
||||
|
||||
project(scylla)
|
||||
|
||||
# Disable CMake's automatic -fcolor-diagnostics injection (CMake 3.24+ adds
|
||||
# it for Clang+Ninja). configure.py does not add any color diagnostics flags,
|
||||
# so we clear the internal CMake variable to prevent injection.
|
||||
set(CMAKE_CXX_COMPILE_OPTIONS_COLOR_DIAGNOSTICS "")
|
||||
set(CMAKE_C_COMPILE_OPTIONS_COLOR_DIAGNOSTICS "")
|
||||
|
||||
list(APPEND CMAKE_MODULE_PATH
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/seastar/cmake)
|
||||
@@ -57,16 +51,6 @@ set(CMAKE_CXX_EXTENSIONS ON CACHE INTERNAL "")
|
||||
set(CMAKE_CXX_SCAN_FOR_MODULES OFF CACHE INTERNAL "")
|
||||
set(CMAKE_VISIBILITY_INLINES_HIDDEN ON)
|
||||
|
||||
# Global defines matching configure.py
|
||||
# Since gcc 13, libgcc doesn't need the exception workaround
|
||||
add_compile_definitions(SEASTAR_NO_EXCEPTION_HACK)
|
||||
# Hacks needed to expose internal APIs for xxhash dependencies
|
||||
add_compile_definitions(XXH_PRIVATE_API)
|
||||
# SEASTAR_TESTING_MAIN is added later (after add_subdirectory(seastar) and
|
||||
# add_subdirectory(abseil)) to avoid leaking into the seastar subdirectory.
|
||||
# If SEASTAR_TESTING_MAIN is defined globally before seastar, it causes a
|
||||
# duplicate 'main' symbol in seastar_testing.
|
||||
|
||||
if(is_multi_config)
|
||||
find_package(Seastar)
|
||||
# this is atypical compared to standard ExternalProject usage:
|
||||
@@ -112,33 +96,12 @@ else()
|
||||
set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_EXCLUDE_TESTS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_IO_URING ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_SCHEDULING_GROUPS_COUNT 24 CACHE STRING "" FORCE)
|
||||
set(Seastar_SCHEDULING_GROUPS_COUNT 21 CACHE STRING "" FORCE)
|
||||
set(Seastar_UNUSED_RESULT_ERROR ON CACHE BOOL "" FORCE)
|
||||
# Match configure.py's build_seastar_shared_libs: Debug and Dev
|
||||
# build Seastar as a shared library, others build it static.
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_TYPE STREQUAL "Dev")
|
||||
set(BUILD_SHARED_LIBS ON CACHE BOOL "" FORCE)
|
||||
else()
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE)
|
||||
endif()
|
||||
add_subdirectory(seastar)
|
||||
|
||||
# Coverage mode sets cmake_build_type='Debug' for Seastar
|
||||
# (configure.py:515), so Seastar's pkg-config output includes sanitizer
|
||||
# link flags in seastar_libs_coverage (configure.py:2514,2649).
|
||||
# Seastar's own CMake only activates sanitizer targets for Debug/Sanitize
|
||||
# configs, so we inject link options on the seastar target for Coverage.
|
||||
# Using PUBLIC ensures they propagate to all targets linking Seastar
|
||||
# (but not standalone tools like patchelf), matching configure.py's
|
||||
# behavior. Compile-time flags and defines are handled globally in
|
||||
# cmake/mode.Coverage.cmake.
|
||||
if(CMAKE_BUILD_TYPE STREQUAL "Coverage")
|
||||
target_link_options(seastar
|
||||
PUBLIC
|
||||
-fsanitize=address
|
||||
-fsanitize=undefined
|
||||
-fsanitize=vptr)
|
||||
endif()
|
||||
target_compile_definitions (seastar
|
||||
PRIVATE
|
||||
SEASTAR_NO_EXCEPTION_HACK)
|
||||
endif()
|
||||
|
||||
set(ABSL_PROPAGATE_CXX_STD ON CACHE BOOL "" FORCE)
|
||||
@@ -148,10 +111,8 @@ if(Scylla_ENABLE_LTO)
|
||||
endif()
|
||||
|
||||
find_package(Sanitizers QUIET)
|
||||
# Match configure.py:2192 — abseil gets sanitizer flags with -fno-sanitize=vptr
|
||||
# to exclude vptr checks which are incompatible with abseil's usage.
|
||||
list(APPEND absl_cxx_flags
|
||||
$<$<CONFIG:Debug,Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>;-fno-sanitize=vptr>)
|
||||
$<$<CONFIG:Debug,Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>>)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
list(APPEND ABSL_GCC_FLAGS ${absl_cxx_flags})
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
@@ -176,38 +137,9 @@ add_library(absl::headers ALIAS absl-headers)
|
||||
# unfortunately.
|
||||
set_target_properties(absl_strerror PROPERTIES EXCLUDE_FROM_ALL TRUE)
|
||||
|
||||
# Now that seastar and abseil subdirectories are fully processed, add
|
||||
# SEASTAR_TESTING_MAIN globally. This matches configure.py's global define
|
||||
# without leaking into seastar (which would cause duplicate main symbols).
|
||||
add_compile_definitions(SEASTAR_TESTING_MAIN)
|
||||
|
||||
# System libraries dependencies
|
||||
find_package(Boost REQUIRED
|
||||
COMPONENTS filesystem program_options system thread regex unit_test_framework)
|
||||
# When using shared Boost libraries, define BOOST_ALL_DYN_LINK (matching configure.py)
|
||||
if(NOT Boost_USE_STATIC_LIBS)
|
||||
add_compile_definitions(BOOST_ALL_DYN_LINK)
|
||||
endif()
|
||||
|
||||
# CMake's Boost package config adds per-component defines like
|
||||
# BOOST_UNIT_TEST_FRAMEWORK_DYN_LINK, BOOST_REGEX_DYN_LINK, etc. on the
|
||||
# imported targets. configure.py only uses BOOST_ALL_DYN_LINK (which covers
|
||||
# all components), so strip the per-component defines to align the two build
|
||||
# systems.
|
||||
foreach(_boost_target
|
||||
Boost::unit_test_framework
|
||||
Boost::regex
|
||||
Boost::filesystem
|
||||
Boost::program_options
|
||||
Boost::system
|
||||
Boost::thread)
|
||||
if(TARGET ${_boost_target})
|
||||
# Completely remove all INTERFACE_COMPILE_DEFINITIONS from the Boost target.
|
||||
# This prevents per-component *_DYN_LINK and *_NO_LIB defines from
|
||||
# propagating. BOOST_ALL_DYN_LINK (set globally) covers all components.
|
||||
set_property(TARGET ${_boost_target} PROPERTY INTERFACE_COMPILE_DEFINITIONS)
|
||||
endif()
|
||||
endforeach()
|
||||
target_link_libraries(Boost::regex
|
||||
INTERFACE
|
||||
ICU::i18n
|
||||
@@ -264,10 +196,6 @@ if (Scylla_USE_PRECOMPILED_HEADER)
|
||||
message(STATUS "Using precompiled header for Scylla - remember to add `sloppiness = pch_defines,time_macros` to ccache.conf, if you're using ccache.")
|
||||
target_precompile_headers(scylla-precompiled-header PRIVATE "stdafx.hh")
|
||||
target_compile_definitions(scylla-precompiled-header PRIVATE SCYLLA_USE_PRECOMPILED_HEADER)
|
||||
# Match configure.py: -fpch-validate-input-files-content tells the compiler
|
||||
# to check content of stdafx.hh if timestamps don't match (important for
|
||||
# ccache/git workflows where timestamps may not be preserved).
|
||||
add_compile_options(-fpch-validate-input-files-content)
|
||||
endif()
|
||||
else()
|
||||
set(Scylla_USE_PRECOMPILED_HEADER_USE OFF)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
## **SCYLLADB SOFTWARE LICENSE AGREEMENT**
|
||||
|
||||
| Version: | 1.1 |
|
||||
| Version: | 1.0 |
|
||||
| :---- | :---- |
|
||||
| Last updated: | April 12, 2026 |
|
||||
| Last updated: | December 18, 2024 |
|
||||
|
||||
**Your Acceptance**
|
||||
|
||||
@@ -12,48 +12,20 @@ The terms "**You**" or "**Licensee**" refer to any individual accessing or using
|
||||
|
||||
**Grant of License**
|
||||
|
||||
* **Definitions:**
|
||||
1. **Software:** Software means the ScyllaDB software provided by Licensor, including the source code, object code, and any accompanying documentation or tools, or any part thereof, as made available under this Agreement.
|
||||
2. **Commercial Customer**: means any legal entity (including its Affiliates) that has entered into a transaction with Licensor, or an authorized reseller/distributor, for the provision of any ScyllaDB products or services. This includes, without limitation: (a) Scope of Service: Any paid subscription, enterprise license, "BYOA" or Database-as-a-Service (DBaaS) offering, technical support, professional services, consulting, or training. (b) Scale and Volume: Any deployment regardless of size, capacity, or performance metrics (c) Payment Method: Any compensation model, including but not limited to, fixed-fee, consumption-based (On-Demand), committed spend, third-party marketplace credits (e.g., AWS, GCP, Azure), or promotional credits and discounts.
|
||||
* **Grant of License:** Subject to the terms and conditions of this Agreement, including the Eligibility and Exclusive Use Restrictions clause, Licensor grants You a limited, non-exclusive, revocable, non-sublicensable, non-transferable, royalty free license to Use the Software, in each case solely for the purposes of:
|
||||
* **Software Definitions:** Software means the ScyllaDB software provided by Licensor, including the source code, object code, and any accompanying documentation or tools, or any part thereof, as made available under this Agreement.
|
||||
* **Grant of License:** Subject to the terms and conditions of this Agreement, Licensor grants You a limited, non-exclusive, revocable, non-sublicensable, non-transferable, royalty free license to Use the Software, in each case solely for the purposes of:
|
||||
1) Copying, distributing, evaluating (including performing benchmarking or comparative tests or evaluations , subject to the limitations below) and improving the Software and ScyllaDB; and
|
||||
2) create a modified version of the Software (each, a "**Licensed Work**"); provided however, that each such Licensed Work keeps all or substantially all of the functions and features of the Software, and/or using all or substantially all of the source code of the Software. You hereby agree that all the Licensed Work are, upon creation, considered Licensed Work of the Licensor, shall be the sole property of the Licensor and its assignees, and the Licensor and its assignees shall be the sole owner of all rights of any kind or nature, in connection with such Licensed Work. You hereby irrevocably and unconditionally assign to the Licensor all the Licensed Work and any part thereof. This License applies separately for each version of the Licensed Work, which shall be considered "Software" for the purpose of this Agreement.
|
||||
|
||||
* **Eligibility and Exclusive Use Restrictions**
|
||||
|
||||
i. Restricted to "Never Customers" Only. The license granted under this Agreement is strictly limited to Never Customers. For purposes of this Agreement, a "Never Customer" is an entity (including its Affiliates) that does not have, and has not had within the previous twelve (12) months, a paid commercial subscription, professional services agreement, or any other commercial relationship with Licensor. Satisfaction of the Never Customer criteria is a strict condition precedent to the effectiveness of this License.
|
||||
|
||||
ii. Total Prohibition for Existing Commercial Customers. If You (or any of Your Affiliates) are an existing Commercial Customer of Licensor within the last twelve (12) months, no license is deemed to have been offered or extended to You, and any download or installation of the Software by You is unauthorized. This prohibition applies to all deployments, including but not limited to:
|
||||
|
||||
(a) existing commercial workloads;
|
||||
|
||||
(b) any new use cases, new applications, or new workloads
|
||||
|
||||
iii. **No "Hybrid" Usage**. Licensee is expressly prohibited from combining free tier usage under this Agreement with paid commercial units.
|
||||
|
||||
If You are a Commercial Customer, all use of the Software across Your entire organization (and any of your Affiliates) must be governed by a valid, paid commercial agreement. Use of the Software under this license by a Commercial Customer (which is not a "Never Customer") shall:
|
||||
|
||||
(a) Void this license *ab initio*;
|
||||
|
||||
(b) Be deemed a material breach of both this Agreement and any existing commercial terms; and
|
||||
|
||||
(c) Entitle Licensor to invoice Licensee for such unauthorized usage at Licensor's standard list prices, retroactive to the date of first use.
|
||||
|
||||
Notwithstanding anything to the contrary in the Eligibility or License Limitations sections above a Commercial Customer may use the Software exclusively for non-production purposes, including Continuous Integration (CI), automated testing, and quality assurance environments, provided that such use at all times remains compliant with the Usage Limit.
|
||||
|
||||
iv. **Verification**. Licensor reserves the right to audit Licensee's environment and corporate identity to ensure compliance with these eligibility criteria.
|
||||
|
||||
For the purposes of this Agreement an "**Affiliate**" means any entity that directly or indirectly controls, is controlled by, or is under common control with a party, where "control" means ownership of more than 50% of the voting stock or decision-making authority
|
||||
|
||||
|
||||
**License Limitations, Restrictions and Obligations:** The license grant above is subject to the following limitations, restrictions, and obligations. If Licensee’s Use of the Software does not comply with the above license grant or the terms of this section (including exceeding the Usage Limit set forth below), Licensee must: (i) refrain from any Use of the Software; and (ii) unless Licensee is a Never Customer, purchase a [commercial paid license](https://www.scylladb.com/scylladb-proprietary-software-license-agreement/) from the Licensor.
|
||||
**License Limitations, Restrictions and Obligations:** The license grant above is subject to the following limitations, restrictions, and obligations. If Licensee’s Use of the Software does not comply with the above license grant or the terms of this section (including exceeding the Usage Limit set forth below), Licensee must: (i) refrain from any Use of the Software; and (ii) purchase a [commercial paid license](https://www.scylladb.com/scylladb-proprietary-software-license-agreement/) from the Licensor.
|
||||
|
||||
* **Updates:** You shall be solely responsible for providing all equipment, systems, assets, access, and ancillary goods and services needed to access and Use the Software. Licensor may modify or update the Software at any time, without notification, in its sole and absolute discretion. After the effective date of each such update, Licensor shall bear no obligation to run, provide or support legacy versions of the Software.
|
||||
* **"Usage Limit":** Licensee's total overall available storage across all deployments and clusters of the Software and the Licensed Work under this License shall not exceed 10TB and/or an upper limit of 50 VCPUs (hyper threads).
|
||||
* **IP Markings:** Licensee must retain all copyright, trademark, and other proprietary notices contained in the Software. You will not modify, delete, alter, remove, or obscure any intellectual property, including without limitations licensing, copyright, trademark, or any other notices of Licensor in the Software.
|
||||
* **License Reproduction:** You must conspicuously display this Agreement on each copy of the Software. If You receive the Software from a third party, this Agreement still applies to Your Use of the Software. You will be responsible for any breach of this Agreement by any such third-party.
|
||||
* Distribution of any Licensed Works is permitted, provided that: (i) You must include in any Licensed Work prominent notices stating that You have modified the Software, (ii) You include a copy of this Agreement with the Licensed Work, and (iii) You clearly identify all modifications made in the Licensed Work and provides attribution to the Licensor as the original author(s) of the Software.
|
||||
* **Commercial Use Restrictions:** Licensee may not offer the Software as a software-as-a-service (SaaS) or commercial database-as-as-service (dBaaS) offering. Licensee may not use the Software to compete with Licensor's existing or future products or services. If your Use of the Software does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its Affiliated entities, or you must refrain from using the Software and all Licensed Work. Furthermore, if You make any written claim of patent infringement relating to the Software, Your patent license for the Software granted under this Agreement terminates immediately.
|
||||
* **Commercial Use Restrictions:** Licensee may not offer the Software as a software-as-a-service (SaaS) or commercial database-as-as-service (dBaaS) offering. Licensee may not use the Software to compete with Licensor's existing or future products or services. If your Use of the Software does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or you must refrain from using the Software and all Licensed Work. Furthermore, if You make any written claim of patent infringement relating to the Software, Your patent license for the Software granted under this Agreement terminates immediately.
|
||||
* Notwithstanding anything to the contrary, under the License granted hereunder, You shall not and shall not permit others to: (i) transfer the Software or any portions thereof to any other party except as expressly permitted herein; (ii) attempt to circumvent or overcome any technological protection measures incorporated into the Software; (iii) incorporate the Software into the structure, machinery or controls of any aircraft, other aerial device, military vehicle, hovercraft, waterborne craft or any medical equipment of any kind; or (iv) use the Software or any part thereof in any unlawful, harmful or illegal manner, or in a manner which infringes third parties’ rights in any way, including intellectual property rights.
|
||||
|
||||
**Monitoring; Audit**
|
||||
@@ -69,14 +41,14 @@ For the purposes of this Agreement an "**Affiliate**" means any entity that dire
|
||||
|
||||
**Indemnity; Disclaimer; Limitation of Liability**
|
||||
|
||||
* **Indemnity:** Licensee hereby agrees to indemnify, defend and hold harmless Licensor and its Affiliates from any losses or damages incurred due to a third party claim arising out of: (i) Licensee’s breach of this Agreement; (ii) Licensee’s negligence, willful misconduct or violation of law, or (iii) Licensee’s products or services.
|
||||
* **Indemnity:** Licensee hereby agrees to indemnify, defend and hold harmless Licensor and its affiliates from any losses or damages incurred due to a third party claim arising out of: (i) Licensee’s breach of this Agreement; (ii) Licensee’s negligence, willful misconduct or violation of law, or (iii) Licensee’s products or services.
|
||||
* DISCLAIMER OF WARRANTIES: LICENSEE AGREES THAT LICENSOR HAS MADE NO EXPRESS WARRANTIES REGARDING THE SOFTWARE AND THAT THE SOFTWARE IS BEING PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. LICENSOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THE SOFTWARE, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION, ANY IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR PURPOSE; TITLE; MERCHANTABILITY; OR NON-INFRINGEMENT OF THIRD PARTY RIGHTS. LICENSOR DOES NOT WARRANT THAT THE SOFTWARE WILL OPERATE UNINTERRUPTED OR ERROR FREE, OR THAT ALL ERRORS WILL BE CORRECTED. LICENSOR DOES NOT GUARANTEE ANY PARTICULAR RESULTS FROM THE USE OF THE SOFTWARE, AND DOES NOT WARRANT THAT THE SOFTWARE IS FIT FOR ANY PARTICULAR PURPOSE.
|
||||
* LIMITATION OF LIABILITY: TO THE FULLEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW, IN NO EVENT WILL LICENSOR AND/OR ITS AFFILIATES, EMPLOYEES, OFFICERS AND DIRECTORS BE LIABLE TO LICENSEE FOR (I) ANY LOSS OF USE OR DATA; INTERRUPTION OF BUSINESS; OR ANY INDIRECT; SPECIAL; INCIDENTAL; OR CONSEQUENTIAL DAMAGES OF ANY KIND (INCLUDING LOST PROFITS); AND (II) ANY DIRECT DAMAGES EXCEEDING THE TOTAL AMOUNT OF ONE THOUSAND US DOLLARS ($1,000). THE FOREGOING PROVISIONS LIMITING THE LIABILITY OF LICENSOR SHALL APPLY REGARDLESS OF THE FORM OR CAUSE OF ACTION, WHETHER IN STRICT LIABILITY, CONTRACT OR TORT.
|
||||
|
||||
**Proprietary Rights; No Other Rights**
|
||||
|
||||
* **Ownership:** Licensor retains sole and exclusive ownership of all rights, interests and title in the Software and any scripts, processes, techniques, methodologies, inventions, know-how, concepts, formatting, arrangements, visual attributes, ideas, database rights, copyrights, patents, trade secrets, and other intellectual property related thereto, and all derivatives, enhancements, modifications and improvements thereof. Except for the limited license rights granted herein, Licensee has no rights in or to the Software and/ or Licensor’s trademarks, logo, or branding and You acknowledge that such Software, trademarks, logo, or branding is the sole property of Licensor.
|
||||
* **Feedback:** Licensee is not required to provide any suggestions, enhancement requests, recommendations or other feedback regarding the Software ("Feedback"). If, notwithstanding this policy, Licensee submits Feedback, Licensee understands and acknowledges that such Feedback is not submitted in confidence and Licensor assumes no obligation, expressed or implied, by considering it. All right in any trademark or logo of Licensor or its Affiliates and You shall make no claim of right to the Software or any part thereof to be supplied by Licensor hereunder and acknowledges that as between Licensor and You, such Software is the sole proprietary, title and interest in and to Licensor.such Feedback shall be assigned to, and shall become the sole and exclusive property of, Licensor upon its creation.
|
||||
* **Feedback:** Licensee is not required to provide any suggestions, enhancement requests, recommendations or other feedback regarding the Software ("Feedback"). If, notwithstanding this policy, Licensee submits Feedback, Licensee understands and acknowledges that such Feedback is not submitted in confidence and Licensor assumes no obligation, expressed or implied, by considering it. All right in any trademark or logo of Licensor or its affiliates and You shall make no claim of right to the Software or any part thereof to be supplied by Licensor hereunder and acknowledges that as between Licensor and You, such Software is the sole proprietary, title and interest in and to Licensor.such Feedback shall be assigned to, and shall become the sole and exclusive property of, Licensor upon its creation.
|
||||
* Except for the rights expressly granted to You under this Agreement, You are not granted any other licenses or rights in the Software or otherwise. This Agreement constitutes the entire agreement between You and the Licensor with respect to the subject matter hereof and supersedes all prior or contemporaneous communications, representations, or agreements, whether oral or written.
|
||||
* **Third-Party Software:** Customer acknowledges that the Software may contain open and closed source components (“OSS Components”) that are governed separately by certain licenses, in each case as further provided by Company upon request. Any applicable OSS Component license is solely between Licensee and the applicable licensor of the OSS Component and Licensee shall comply with the applicable OSS Component license.
|
||||
* If any provision of this Agreement is held to be invalid or unenforceable, such provision shall be struck and the remaining provisions shall remain in full force and effect.
|
||||
@@ -84,7 +56,7 @@ For the purposes of this Agreement an "**Affiliate**" means any entity that dire
|
||||
**Miscellaneous**
|
||||
|
||||
* **Miscellaneous:** This Agreement may be modified at any time by Licensor, and constitutes the entire agreement between the parties with respect to the subject matter hereof. Licensee may not assign or subcontract its rights or obligations under this Agreement. This Agreement does not, and shall not be construed to create any relationship, partnership, joint venture, employer-employee, agency, or franchisor-franchisee relationship between the parties.
|
||||
* **Modifications**: Licensor reserves the right to modify this Agreement at any time. Changes will be effective upon posting to the Website or within the Software repository. Continued use of the Software after such changes constitutes acceptance.
|
||||
* **Governing Law & Jurisdiction:** This Agreement shall be governed and construed in accordance with the laws of Israel, without giving effect to their respective conflicts of laws provisions, and the competent courts situated in Tel Aviv, Israel, shall have sole and exclusive jurisdiction over the parties and any conflict and/or dispute arising out of, or in connection to, this Agreement
|
||||
|
||||
\[*End of ScyllaDB Software License Agreement*\]
|
||||
|
||||
|
||||
2
abseil
2
abseil
Submodule abseil updated: 255c84dadd...d7aaad83b4
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "absl-flat_hash_map.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -9,8 +9,6 @@ target_sources(alternator
|
||||
controller.cc
|
||||
server.cc
|
||||
executor.cc
|
||||
executor_read.cc
|
||||
executor_util.cc
|
||||
stats.cc
|
||||
serialization.cc
|
||||
expressions.cc
|
||||
|
||||
@@ -1,253 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <variant>
|
||||
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/overloaded_functor.hh"
|
||||
#include "alternator/error.hh"
|
||||
#include "alternator/expressions_types.hh"
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// An attribute_path_map object is used to hold data for various attributes
|
||||
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
|
||||
// has a root attribute, and then modified by member and index operators -
|
||||
// for example in "a.b[2].c" we have "a" as the root, then ".b" member, then
|
||||
// "[2]" index, and finally ".c" member.
|
||||
// Data can be added to an attribute_path_map using the add() function, but
|
||||
// requires that attributes with data not be *overlapping* or *conflicting*:
|
||||
//
|
||||
// 1. Two attribute paths which are identical or an ancestor of one another
|
||||
// are considered *overlapping* and not allowed. If a.b.c has data,
|
||||
// we can't add more data in a.b.c or any of its descendants like a.b.c.d.
|
||||
//
|
||||
// 2. Two attribute paths which need the same parent to have both a member and
|
||||
// an index are considered *conflicting* and not allowed. E.g., if a.b has
|
||||
// data, you can't add a[1]. The meaning of adding both would be that the
|
||||
// attribute a is both a map and an array, which isn't sensible.
|
||||
//
|
||||
// These two requirements are common to the two places where Alternator uses
|
||||
// this abstraction to describe how a hierarchical item is to be transformed:
|
||||
//
|
||||
// 1. In ProjectExpression: for filtering from a full top-level attribute
|
||||
// only the parts for which user asked in ProjectionExpression.
|
||||
//
|
||||
// 2. In UpdateExpression: for taking the previous value of a top-level
|
||||
// attribute, and modifying it based on the instructions in the user
|
||||
// wrote in UpdateExpression.
|
||||
|
||||
template<typename T>
|
||||
class attribute_path_map_node {
|
||||
public:
|
||||
using data_t = T;
|
||||
// We need the extra unique_ptr<> here because libstdc++ unordered_map
|
||||
// doesn't work with incomplete types :-(
|
||||
using members_t = std::unordered_map<std::string, std::unique_ptr<attribute_path_map_node<T>>>;
|
||||
// The indexes list is sorted because DynamoDB requires handling writes
|
||||
// beyond the end of a list in index order.
|
||||
using indexes_t = std::map<unsigned, std::unique_ptr<attribute_path_map_node<T>>>;
|
||||
// The prohibition on "overlap" and "conflict" explained above means
|
||||
// That only one of data, members or indexes is non-empty.
|
||||
std::optional<std::variant<data_t, members_t, indexes_t>> _content;
|
||||
|
||||
bool is_empty() const { return !_content; }
|
||||
bool has_value() const { return _content && std::holds_alternative<data_t>(*_content); }
|
||||
bool has_members() const { return _content && std::holds_alternative<members_t>(*_content); }
|
||||
bool has_indexes() const { return _content && std::holds_alternative<indexes_t>(*_content); }
|
||||
// get_members() assumes that has_members() is true
|
||||
members_t& get_members() { return std::get<members_t>(*_content); }
|
||||
const members_t& get_members() const { return std::get<members_t>(*_content); }
|
||||
indexes_t& get_indexes() { return std::get<indexes_t>(*_content); }
|
||||
const indexes_t& get_indexes() const { return std::get<indexes_t>(*_content); }
|
||||
T& get_value() { return std::get<T>(*_content); }
|
||||
const T& get_value() const { return std::get<T>(*_content); }
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
|
||||
|
||||
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
|
||||
// attrs_to_get lists which top-level attribute are needed, and possibly also
|
||||
// which part of the top-level attribute is really needed (when nested
|
||||
// attribute paths appeared in the query).
|
||||
// Most code actually uses optional<attrs_to_get>. There, a disengaged
|
||||
// optional means we should get all attributes, not specific ones.
|
||||
using attrs_to_get = attribute_path_map<std::monostate>;
|
||||
|
||||
// takes a given JSON value and drops its parts which weren't asked to be
|
||||
// kept. It modifies the given JSON value, or returns false to signify that
|
||||
// the entire object should be dropped.
|
||||
// Note that The JSON value is assumed to be encoded using the DynamoDB
|
||||
// conventions - i.e., it is really a map whose key has a type string,
|
||||
// and the value is the real object.
|
||||
template<typename T>
|
||||
bool hierarchy_filter(rjson::value& val, const attribute_path_map_node<T>& h) {
|
||||
if (!val.IsObject() || val.MemberCount() != 1) {
|
||||
// This shouldn't happen. We shouldn't have stored malformed objects.
|
||||
// But today Alternator does not validate the structure of nested
|
||||
// documents before storing them, so this can happen on read.
|
||||
throw api_error::internal(format("Malformed value object read: {}", val));
|
||||
}
|
||||
const char* type = val.MemberBegin()->name.GetString();
|
||||
rjson::value& v = val.MemberBegin()->value;
|
||||
if (h.has_members()) {
|
||||
const auto& members = h.get_members();
|
||||
if (type[0] != 'M' || !v.IsObject()) {
|
||||
// If v is not an object (dictionary, map), none of the members
|
||||
// can match.
|
||||
return false;
|
||||
}
|
||||
rjson::value newv = rjson::empty_object();
|
||||
for (auto it = v.MemberBegin(); it != v.MemberEnd(); ++it) {
|
||||
std::string attr = rjson::to_string(it->name);
|
||||
auto x = members.find(attr);
|
||||
if (x != members.end()) {
|
||||
if (x->second) {
|
||||
// Only a part of this attribute is to be filtered, do it.
|
||||
if (hierarchy_filter(it->value, *x->second)) {
|
||||
// because newv started empty and attr are unique
|
||||
// (keys of v), we can use add() here
|
||||
rjson::add_with_string_name(newv, attr, std::move(it->value));
|
||||
}
|
||||
} else {
|
||||
// The entire attribute is to be kept
|
||||
rjson::add_with_string_name(newv, attr, std::move(it->value));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (newv.MemberCount() == 0) {
|
||||
return false;
|
||||
}
|
||||
v = newv;
|
||||
} else if (h.has_indexes()) {
|
||||
const auto& indexes = h.get_indexes();
|
||||
if (type[0] != 'L' || !v.IsArray()) {
|
||||
return false;
|
||||
}
|
||||
rjson::value newv = rjson::empty_array();
|
||||
const auto& a = v.GetArray();
|
||||
for (unsigned i = 0; i < v.Size(); i++) {
|
||||
auto x = indexes.find(i);
|
||||
if (x != indexes.end()) {
|
||||
if (x->second) {
|
||||
if (hierarchy_filter(a[i], *x->second)) {
|
||||
rjson::push_back(newv, std::move(a[i]));
|
||||
}
|
||||
} else {
|
||||
// The entire attribute is to be kept
|
||||
rjson::push_back(newv, std::move(a[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (newv.Size() == 0) {
|
||||
return false;
|
||||
}
|
||||
v = newv;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Add a path to an attribute_path_map. Throws a validation error if the path
|
||||
// "overlaps" with one already in the filter (one is a sub-path of the other)
|
||||
// or "conflicts" with it (both a member and index is requested).
|
||||
template<typename T>
|
||||
void attribute_path_map_add(const char* source, attribute_path_map<T>& map, const parsed::path& p, T value = {}) {
|
||||
using node = attribute_path_map_node<T>;
|
||||
// The first step is to look for the top-level attribute (p.root()):
|
||||
auto it = map.find(p.root());
|
||||
if (it == map.end()) {
|
||||
if (p.has_operators()) {
|
||||
it = map.emplace(p.root(), node {std::nullopt}).first;
|
||||
} else {
|
||||
(void) map.emplace(p.root(), node {std::move(value)}).first;
|
||||
// Value inserted for top-level node. We're done.
|
||||
return;
|
||||
}
|
||||
} else if(!p.has_operators()) {
|
||||
// If p is top-level and we already have it or a part of it
|
||||
// in map, it's a forbidden overlapping path.
|
||||
throw api_error::validation(fmt::format(
|
||||
"Invalid {}: two document paths overlap at {}", source, p.root()));
|
||||
} else if (it->second.has_value()) {
|
||||
// If we're here, it != map.end() && p.has_operators && it->second.has_value().
|
||||
// This means the top-level attribute already has a value, and we're
|
||||
// trying to add a non-top-level value. It's an overlap.
|
||||
throw api_error::validation(fmt::format("Invalid {}: two document paths overlap at {}", source, p.root()));
|
||||
}
|
||||
node* h = &it->second;
|
||||
// The second step is to walk h from the top-level node to the inner node
|
||||
// where we're supposed to insert the value:
|
||||
for (const auto& op : p.operators()) {
|
||||
std::visit(overloaded_functor {
|
||||
[&] (const std::string& member) {
|
||||
if (h->is_empty()) {
|
||||
*h = node {typename node::members_t()};
|
||||
} else if (h->has_indexes()) {
|
||||
throw api_error::validation(format("Invalid {}: two document paths conflict at {}", source, p));
|
||||
} else if (h->has_value()) {
|
||||
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p));
|
||||
}
|
||||
typename node::members_t& members = h->get_members();
|
||||
auto it = members.find(member);
|
||||
if (it == members.end()) {
|
||||
it = members.insert({member, std::make_unique<node>()}).first;
|
||||
}
|
||||
h = it->second.get();
|
||||
},
|
||||
[&] (unsigned index) {
|
||||
if (h->is_empty()) {
|
||||
*h = node {typename node::indexes_t()};
|
||||
} else if (h->has_members()) {
|
||||
throw api_error::validation(format("Invalid {}: two document paths conflict at {}", source, p));
|
||||
} else if (h->has_value()) {
|
||||
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p));
|
||||
}
|
||||
typename node::indexes_t& indexes = h->get_indexes();
|
||||
auto it = indexes.find(index);
|
||||
if (it == indexes.end()) {
|
||||
it = indexes.insert({index, std::make_unique<node>()}).first;
|
||||
}
|
||||
h = it->second.get();
|
||||
}
|
||||
}, op);
|
||||
}
|
||||
// Finally, insert the value in the node h.
|
||||
if (h->is_empty()) {
|
||||
*h = node {std::move(value)};
|
||||
} else {
|
||||
throw api_error::validation(format("Invalid {}: two document paths overlap at {}", source, p));
|
||||
}
|
||||
}
|
||||
|
||||
// A very simplified version of the above function for the special case of
|
||||
// adding only top-level attribute. It's not only simpler, we also use a
|
||||
// different error message, referring to a "duplicate attribute" instead of
|
||||
// "overlapping paths". DynamoDB also has this distinction (errors in
|
||||
// AttributesToGet refer to duplicates, not overlaps, but errors in
|
||||
// ProjectionExpression refer to overlap - even if it's an exact duplicate).
|
||||
template<typename T>
|
||||
void attribute_path_map_add(const char* source, attribute_path_map<T>& map, const std::string& attr, T value = {}) {
|
||||
using node = attribute_path_map_node<T>;
|
||||
auto it = map.find(attr);
|
||||
if (it == map.end()) {
|
||||
map.emplace(attr, node {std::move(value)});
|
||||
} else {
|
||||
throw api_error::validation(fmt::format(
|
||||
"Invalid {}: Duplicate attribute: {}", source, attr));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace alternator
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "alternator/error.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <string_view>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "consumed_capacity.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <seastar/core/with_scheduling_group.hh>
|
||||
@@ -18,7 +18,6 @@
|
||||
#include "service/memory_limiter.hh"
|
||||
#include "auth/service.hh"
|
||||
#include "service/qos/service_level_controller.hh"
|
||||
#include "vector_search/vector_store_client.hh"
|
||||
|
||||
using namespace seastar;
|
||||
|
||||
@@ -32,12 +31,10 @@ controller::controller(
|
||||
sharded<service::storage_service>& ss,
|
||||
sharded<service::migration_manager>& mm,
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<db::system_keyspace>& sys_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
sharded<vector_search::vector_store_client>& vsc,
|
||||
const db::config& config,
|
||||
seastar::scheduling_group sg)
|
||||
: protocol_server(sg)
|
||||
@@ -46,12 +43,10 @@ controller::controller(
|
||||
, _ss(ss)
|
||||
, _mm(mm)
|
||||
, _sys_dist_ks(sys_dist_ks)
|
||||
, _sys_ks(sys_ks)
|
||||
, _cdc_gen_svc(cdc_gen_svc)
|
||||
, _memory_limiter(memory_limiter)
|
||||
, _auth_service(auth_service)
|
||||
, _sl_controller(sl_controller)
|
||||
, _vsc(vsc)
|
||||
, _config(config)
|
||||
{
|
||||
}
|
||||
@@ -96,8 +91,8 @@ future<> controller::start_server() {
|
||||
auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> {
|
||||
return cfg.alternator_timeout_in_ms;
|
||||
};
|
||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_ss), std::ref(_mm), std::ref(_sys_dist_ks), std::ref(_sys_ks),
|
||||
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), std::ref(_vsc), _ssg.value(),
|
||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_ss), std::ref(_mm), std::ref(_sys_dist_ks),
|
||||
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value(),
|
||||
sharded_parameter(get_timeout_in_ms, std::ref(_config))).get();
|
||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
|
||||
// Note: from this point on, if start_server() throws for any reason,
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -22,7 +22,6 @@ class memory_limiter;
|
||||
|
||||
namespace db {
|
||||
class system_distributed_keyspace;
|
||||
class system_keyspace;
|
||||
class config;
|
||||
}
|
||||
|
||||
@@ -44,10 +43,6 @@ namespace qos {
|
||||
class service_level_controller;
|
||||
}
|
||||
|
||||
namespace vector_search {
|
||||
class vector_store_client;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
|
||||
// This is the official DynamoDB API version.
|
||||
@@ -66,12 +61,10 @@ class controller : public protocol_server {
|
||||
sharded<service::storage_service>& _ss;
|
||||
sharded<service::migration_manager>& _mm;
|
||||
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
||||
sharded<db::system_keyspace>& _sys_ks;
|
||||
sharded<cdc::generation_service>& _cdc_gen_svc;
|
||||
sharded<service::memory_limiter>& _memory_limiter;
|
||||
sharded<auth::service>& _auth_service;
|
||||
sharded<qos::service_level_controller>& _sl_controller;
|
||||
sharded<vector_search::vector_store_client>& _vsc;
|
||||
const db::config& _config;
|
||||
|
||||
std::vector<socket_address> _listen_addresses;
|
||||
@@ -86,12 +79,10 @@ public:
|
||||
sharded<service::storage_service>& ss,
|
||||
sharded<service::migration_manager>& mm,
|
||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||
sharded<db::system_keyspace>& sys_ks,
|
||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||
sharded<service::memory_limiter>& memory_limiter,
|
||||
sharded<auth::service>& auth_service,
|
||||
sharded<qos::service_level_controller>& sl_controller,
|
||||
sharded<vector_search::vector_store_client>& vsc,
|
||||
const db::config& config,
|
||||
seastar::scheduling_group sg);
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,15 +3,13 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <seastar/core/future.hh>
|
||||
#include "audit/audit.hh"
|
||||
#include "seastarx.hh"
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/core/sharded.hh>
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
|
||||
@@ -22,23 +20,15 @@
|
||||
#include "db/config.hh"
|
||||
|
||||
#include "alternator/error.hh"
|
||||
#include "alternator/attribute_path.hh"
|
||||
#include "alternator/stats.hh"
|
||||
#include "alternator/executor_util.hh"
|
||||
|
||||
#include "stats.hh"
|
||||
#include "utils/rjson.hh"
|
||||
#include "utils/updateable_value.hh"
|
||||
#include "utils/simple_value_with_expiry.hh"
|
||||
|
||||
#include "tracing/trace_state.hh"
|
||||
|
||||
|
||||
namespace db {
|
||||
class system_distributed_keyspace;
|
||||
class system_keyspace;
|
||||
}
|
||||
|
||||
namespace audit {
|
||||
class audit_info_alternator;
|
||||
}
|
||||
|
||||
namespace query {
|
||||
@@ -56,10 +46,6 @@ namespace service {
|
||||
class storage_service;
|
||||
}
|
||||
|
||||
namespace vector_search {
|
||||
class vector_store_client;
|
||||
}
|
||||
|
||||
namespace cdc {
|
||||
class metadata;
|
||||
}
|
||||
@@ -72,13 +58,82 @@ class gossiper;
|
||||
|
||||
class schema_builder;
|
||||
|
||||
|
||||
namespace alternator {
|
||||
|
||||
enum class table_status;
|
||||
class rmw_operation;
|
||||
class put_or_delete_item;
|
||||
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
// Wraps the db::get_tags_of_table and throws if the table is missing the tags extension.
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
|
||||
|
||||
// An attribute_path_map object is used to hold data for various attributes
|
||||
// paths (parsed::path) in a hierarchy of attribute paths. Each attribute path
|
||||
// has a root attribute, and then modified by member and index operators -
|
||||
// for example in "a.b[2].c" we have "a" as the root, then ".b" member, then
|
||||
// "[2]" index, and finally ".c" member.
|
||||
// Data can be added to an attribute_path_map using the add() function, but
|
||||
// requires that attributes with data not be *overlapping* or *conflicting*:
|
||||
//
|
||||
// 1. Two attribute paths which are identical or an ancestor of one another
|
||||
// are considered *overlapping* and not allowed. If a.b.c has data,
|
||||
// we can't add more data in a.b.c or any of its descendants like a.b.c.d.
|
||||
//
|
||||
// 2. Two attribute paths which need the same parent to have both a member and
|
||||
// an index are considered *conflicting* and not allowed. E.g., if a.b has
|
||||
// data, you can't add a[1]. The meaning of adding both would be that the
|
||||
// attribute a is both a map and an array, which isn't sensible.
|
||||
//
|
||||
// These two requirements are common to the two places where Alternator uses
|
||||
// this abstraction to describe how a hierarchical item is to be transformed:
|
||||
//
|
||||
// 1. In ProjectExpression: for filtering from a full top-level attribute
|
||||
// only the parts for which user asked in ProjectionExpression.
|
||||
//
|
||||
// 2. In UpdateExpression: for taking the previous value of a top-level
|
||||
// attribute, and modifying it based on the instructions in the user
|
||||
// wrote in UpdateExpression.
|
||||
|
||||
template<typename T>
|
||||
class attribute_path_map_node {
|
||||
public:
|
||||
using data_t = T;
|
||||
// We need the extra unique_ptr<> here because libstdc++ unordered_map
|
||||
// doesn't work with incomplete types :-(
|
||||
using members_t = std::unordered_map<std::string, std::unique_ptr<attribute_path_map_node<T>>>;
|
||||
// The indexes list is sorted because DynamoDB requires handling writes
|
||||
// beyond the end of a list in index order.
|
||||
using indexes_t = std::map<unsigned, std::unique_ptr<attribute_path_map_node<T>>>;
|
||||
// The prohibition on "overlap" and "conflict" explained above means
|
||||
// That only one of data, members or indexes is non-empty.
|
||||
std::optional<std::variant<data_t, members_t, indexes_t>> _content;
|
||||
|
||||
bool is_empty() const { return !_content; }
|
||||
bool has_value() const { return _content && std::holds_alternative<data_t>(*_content); }
|
||||
bool has_members() const { return _content && std::holds_alternative<members_t>(*_content); }
|
||||
bool has_indexes() const { return _content && std::holds_alternative<indexes_t>(*_content); }
|
||||
// get_members() assumes that has_members() is true
|
||||
members_t& get_members() { return std::get<members_t>(*_content); }
|
||||
const members_t& get_members() const { return std::get<members_t>(*_content); }
|
||||
indexes_t& get_indexes() { return std::get<indexes_t>(*_content); }
|
||||
const indexes_t& get_indexes() const { return std::get<indexes_t>(*_content); }
|
||||
T& get_value() { return std::get<T>(*_content); }
|
||||
const T& get_value() const { return std::get<T>(*_content); }
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
using attribute_path_map = std::unordered_map<std::string, attribute_path_map_node<T>>;
|
||||
|
||||
using attrs_to_get_node = attribute_path_map_node<std::monostate>;
|
||||
// attrs_to_get lists which top-level attribute are needed, and possibly also
|
||||
// which part of the top-level attribute is really needed (when nested
|
||||
// attribute paths appeared in the query).
|
||||
// Most code actually uses optional<attrs_to_get>. There, a disengaged
|
||||
// optional means we should get all attributes, not specific ones.
|
||||
using attrs_to_get = attribute_path_map<std::monostate>;
|
||||
|
||||
namespace parsed {
|
||||
class expression_cache;
|
||||
}
|
||||
@@ -89,12 +144,9 @@ class executor : public peering_sharded_service<executor> {
|
||||
service::storage_proxy& _proxy;
|
||||
service::migration_manager& _mm;
|
||||
db::system_distributed_keyspace& _sdks;
|
||||
db::system_keyspace& _system_keyspace;
|
||||
cdc::metadata& _cdc_metadata;
|
||||
vector_search::vector_store_client& _vsc;
|
||||
utils::updateable_value<bool> _enforce_authorization;
|
||||
utils::updateable_value<bool> _warn_authorization;
|
||||
seastar::sharded<audit::audit>& _audit;
|
||||
// An smp_service_group to be used for limiting the concurrency when
|
||||
// forwarding Alternator request between shards - if necessary for LWT.
|
||||
smp_service_group _ssg;
|
||||
@@ -119,6 +171,7 @@ public:
|
||||
// is written in chunks to the output_stream. This allows for efficient
|
||||
// handling of large responses without needing to allocate a large buffer
|
||||
// in memory.
|
||||
using body_writer = noncopyable_function<future<>(output_stream<char>&&)>;
|
||||
using request_return_type = std::variant<std::string, body_writer, api_error>;
|
||||
stats _stats;
|
||||
// The metric_groups object holds this stat object's metrics registered
|
||||
@@ -133,60 +186,53 @@ public:
|
||||
service::storage_service& ss,
|
||||
service::migration_manager& mm,
|
||||
db::system_distributed_keyspace& sdks,
|
||||
db::system_keyspace& system_keyspace,
|
||||
cdc::metadata& cdc_metadata,
|
||||
vector_search::vector_store_client& vsc,
|
||||
smp_service_group ssg,
|
||||
utils::updateable_value<uint32_t> default_timeout_in_ms);
|
||||
~executor();
|
||||
|
||||
future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> delete_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> update_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> put_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> delete_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> update_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> list_tables(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> scan(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> describe_endpoints(client_state& client_state, service_permit permit, rjson::value request, std::string host_header, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> batch_write_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> batch_get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> query(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> tag_resource(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> untag_resource(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> list_tags_of_resource(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> update_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> list_streams(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<request_return_type> create_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> delete_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_table(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> put_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> delete_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_tables(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> scan(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_endpoints(client_state& client_state, service_permit permit, rjson::value request, std::string host_header);
|
||||
future<request_return_type> batch_write_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> batch_get_item(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> query(client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> tag_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> untag_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_tags_of_resource(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> update_time_to_live(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> list_streams(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_stream(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_shard_iterator(client_state& client_state, service_permit permit, rjson::value request);
|
||||
future<request_return_type> get_records(client_state& client_state, tracing::trace_state_ptr, service_permit permit, rjson::value request);
|
||||
future<request_return_type> describe_continuous_backups(client_state& client_state, service_permit permit, rjson::value request);
|
||||
|
||||
future<> start();
|
||||
future<> stop();
|
||||
|
||||
static sstring table_name(const schema&);
|
||||
static db::timeout_clock::time_point default_timeout();
|
||||
private:
|
||||
static thread_local utils::updateable_value<uint32_t> s_default_timeout_in_ms;
|
||||
public:
|
||||
static schema_ptr find_table(service::storage_proxy&, std::string_view table_name);
|
||||
static schema_ptr find_table(service::storage_proxy&, const rjson::value& request);
|
||||
|
||||
private:
|
||||
friend class rmw_operation;
|
||||
|
||||
// Helper to set up auditing for an Alternator operation. Checks whether
|
||||
// the operation should be audited (via will_log()) and if so, allocates
|
||||
// and populates audit_info. No allocation occurs when auditing is disabled.
|
||||
void maybe_audit(std::unique_ptr<audit::audit_info_alternator>& audit_info,
|
||||
audit::statement_category category,
|
||||
std::string_view ks_name,
|
||||
std::string_view table_name,
|
||||
std::string_view operation_name,
|
||||
const rjson::value& request,
|
||||
std::optional<db::consistency_level> cl = std::nullopt);
|
||||
|
||||
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr, const std::map<sstring, sstring> *tags = nullptr);
|
||||
future<rjson::value> fill_table_description(schema_ptr schema, table_status tbl_status, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit);
|
||||
future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, bool enforce_authorization,
|
||||
bool warn_authorization, const db::tablets_mode_t::mode tablets_mode, std::unique_ptr<audit::audit_info_alternator>& audit_info);
|
||||
future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, bool enforce_authorization, bool warn_authorization, const db::tablets_mode_t::mode tablets_mode);
|
||||
|
||||
future<> do_batch_write(
|
||||
std::vector<std::pair<schema_ptr, put_or_delete_item>> mutation_builders,
|
||||
@@ -199,34 +245,60 @@ private:
|
||||
tracing::trace_state_ptr trace_state, service_permit permit);
|
||||
|
||||
public:
|
||||
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&, const std::map<sstring, sstring> *tags = nullptr);
|
||||
|
||||
static std::optional<rjson::value> describe_single_item(schema_ptr,
|
||||
const query::partition_slice&,
|
||||
const cql3::selection::selection&,
|
||||
const query::result&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
uint64_t* = nullptr);
|
||||
|
||||
// Converts a multi-row selection result to JSON compatible with DynamoDB.
|
||||
// For each row, this method calls item_callback, which takes the size of
|
||||
// the item as the parameter.
|
||||
static future<std::vector<rjson::value>> describe_multi_item(schema_ptr schema,
|
||||
const query::partition_slice&& slice,
|
||||
shared_ptr<cql3::selection::selection> selection,
|
||||
foreign_ptr<lw_shared_ptr<query::result>> query_result,
|
||||
shared_ptr<const std::optional<attrs_to_get>> attrs_to_get,
|
||||
noncopyable_function<void(uint64_t)> item_callback = {});
|
||||
|
||||
static void describe_single_item(const cql3::selection::selection&,
|
||||
const std::vector<managed_bytes_opt>&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
rjson::value&,
|
||||
uint64_t* item_length_in_bytes = nullptr,
|
||||
bool = false);
|
||||
|
||||
static bool add_stream_options(const rjson::value& stream_spec, schema_builder&, service::storage_proxy& sp);
|
||||
static void supplement_table_info(rjson::value& descr, const schema& schema, service::storage_proxy& sp);
|
||||
static void supplement_table_stream_info(rjson::value& descr, const schema& schema, const service::storage_proxy& sp);
|
||||
};
|
||||
|
||||
// returns table creation time in seconds since epoch for `db_clock`
|
||||
double get_table_creation_time(const schema &schema);
|
||||
// is_big() checks approximately if the given JSON value is "bigger" than
|
||||
// the given big_size number of bytes. The goal is to *quickly* detect
|
||||
// oversized JSON that, for example, is too large to be serialized to a
|
||||
// contiguous string - we don't need an accurate size for that. Moreover,
|
||||
// as soon as we detect that the JSON is indeed "big", we can return true
|
||||
// and don't need to continue calculating its exact size.
|
||||
// For simplicity, we use a recursive implementation. This is fine because
|
||||
// Alternator limits the depth of JSONs it reads from inputs, and doesn't
|
||||
// add more than a couple of levels in its own output construction.
|
||||
bool is_big(const rjson::value& val, int big_size = 100'000);
|
||||
|
||||
// result of parsing ARN (Amazon Resource Name)
|
||||
// ARN format is `arn:<partition>:<service>:<region>:<account-id>:<resource-type>/<resource-id>/<postfix>`
|
||||
// we ignore partition, service and account-id
|
||||
// resource-type must be string "table"
|
||||
// resource-id will be returned as table_name
|
||||
// region will be returned as keyspace_name
|
||||
// postfix is a string after resource-id and will be returned as is (whole), including separator.
|
||||
struct arn_parts {
|
||||
std::string_view keyspace_name;
|
||||
std::string_view table_name;
|
||||
std::string_view postfix;
|
||||
};
|
||||
// arn - arn to parse
|
||||
// arn_field_name - identifier of the ARN, used only when reporting an error (in error messages), for example "Incorrect resource identifier `<arn_field_name>`"
|
||||
// type_name - used only when reporting an error (in error messages), for example "... is not a valid <type_name> ARN ..."
|
||||
// expected_postfix - optional filter of postfix value (part of ARN after resource-id, including separator, see comments for struct arn_parts).
|
||||
// If is empty - then postfix value must be empty as well
|
||||
// if not empty - postfix value must start with expected_postfix, but might be longer
|
||||
arn_parts parse_arn(std::string_view arn, std::string_view arn_field_name, std::string_view type_name, std::string_view expected_postfix);
|
||||
// Check CQL's Role-Based Access Control (RBAC) permission (MODIFY,
|
||||
// SELECT, DROP, etc.) on the given table. When permission is denied an
|
||||
// appropriate user-readable api_error::access_denied is thrown.
|
||||
future<> verify_permission(bool enforce_authorization, bool warn_authorization, const service::client_state&, const schema_ptr&, auth::permission, alternator::stats& stats);
|
||||
|
||||
/**
|
||||
* Make return type for serializing the object "streamed",
|
||||
* i.e. direct to HTTP output stream. Note: only useful for
|
||||
* (very) large objects as there are overhead issues with this
|
||||
* as well, but for massive lists of return objects this can
|
||||
* help avoid large allocations/many re-allocs
|
||||
*/
|
||||
executor::body_writer make_streamed(rjson::value&&);
|
||||
|
||||
// The format is ks1|ks2|ks3... and table1|table2|table3...
|
||||
sstring print_names_for_audit(const std::set<sstring>& names);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,559 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
*/
|
||||
|
||||
#include "alternator/executor_util.hh"
|
||||
#include "alternator/executor.hh"
|
||||
#include "alternator/error.hh"
|
||||
#include "auth/resource.hh"
|
||||
#include "auth/service.hh"
|
||||
#include "cdc/log.hh"
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
#include "db/tags/utils.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "cql3/selection/selection.hh"
|
||||
#include "cql3/result_set.hh"
|
||||
#include "serialization.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
#include "types/map.hh"
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace alternator {
|
||||
|
||||
extern logging::logger elogger; // from executor.cc
|
||||
|
||||
std::optional<int> get_int_attribute(const rjson::value& value, std::string_view attribute_name) {
|
||||
const rjson::value* attribute_value = rjson::find(value, attribute_name);
|
||||
if (!attribute_value)
|
||||
return {};
|
||||
if (!attribute_value->IsInt()) {
|
||||
throw api_error::validation(fmt::format("Expected integer value for attribute {}, got: {}",
|
||||
attribute_name, value));
|
||||
}
|
||||
return attribute_value->GetInt();
|
||||
}
|
||||
|
||||
std::string get_string_attribute(const rjson::value& value, std::string_view attribute_name, const char* default_return) {
|
||||
const rjson::value* attribute_value = rjson::find(value, attribute_name);
|
||||
if (!attribute_value)
|
||||
return default_return;
|
||||
if (!attribute_value->IsString()) {
|
||||
throw api_error::validation(fmt::format("Expected string value for attribute {}, got: {}",
|
||||
attribute_name, value));
|
||||
}
|
||||
return rjson::to_string(*attribute_value);
|
||||
}
|
||||
|
||||
bool get_bool_attribute(const rjson::value& value, std::string_view attribute_name, bool default_return) {
|
||||
const rjson::value* attribute_value = rjson::find(value, attribute_name);
|
||||
if (!attribute_value) {
|
||||
return default_return;
|
||||
}
|
||||
if (!attribute_value->IsBool()) {
|
||||
throw api_error::validation(fmt::format("Expected boolean value for attribute {}, got: {}",
|
||||
attribute_name, value));
|
||||
}
|
||||
return attribute_value->GetBool();
|
||||
}
|
||||
|
||||
std::optional<std::string> find_table_name(const rjson::value& request) {
|
||||
const rjson::value* table_name_value = rjson::find(request, "TableName");
|
||||
if (!table_name_value) {
|
||||
return std::nullopt;
|
||||
}
|
||||
if (!table_name_value->IsString()) {
|
||||
throw api_error::validation("Non-string TableName field in request");
|
||||
}
|
||||
std::string table_name = rjson::to_string(*table_name_value);
|
||||
return table_name;
|
||||
}
|
||||
|
||||
std::string get_table_name(const rjson::value& request) {
|
||||
auto name = find_table_name(request);
|
||||
if (!name) {
|
||||
throw api_error::validation("Missing TableName field in request");
|
||||
}
|
||||
return *name;
|
||||
}
|
||||
|
||||
schema_ptr find_table(service::storage_proxy& proxy, const rjson::value& request) {
|
||||
auto table_name = find_table_name(request);
|
||||
if (!table_name) {
|
||||
return nullptr;
|
||||
}
|
||||
return find_table(proxy, *table_name);
|
||||
}
|
||||
|
||||
schema_ptr find_table(service::storage_proxy& proxy, std::string_view table_name) {
|
||||
try {
|
||||
return proxy.data_dictionary().find_schema(sstring(executor::KEYSPACE_NAME_PREFIX) + sstring(table_name), table_name);
|
||||
} catch(data_dictionary::no_such_column_family&) {
|
||||
// DynamoDB returns validation error even when table does not exist
|
||||
// and the table name is invalid.
|
||||
validate_table_name(table_name);
|
||||
|
||||
throw api_error::resource_not_found(
|
||||
fmt::format("Requested resource not found: Table: {} not found", table_name));
|
||||
}
|
||||
}
|
||||
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request) {
|
||||
auto schema = find_table(proxy, request);
|
||||
if (!schema) {
|
||||
// if we get here then the name was missing, since syntax or missing actual CF
|
||||
// checks throw. Slow path, but just call get_table_name to generate exception.
|
||||
get_table_name(request);
|
||||
}
|
||||
return schema;
|
||||
}
|
||||
|
||||
map_type attrs_type() {
|
||||
static thread_local auto t = map_type_impl::get_instance(utf8_type, bytes_type, true);
|
||||
return t;
|
||||
}
|
||||
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema) {
|
||||
auto tags_ptr = db::get_tags_of_table(schema);
|
||||
if (tags_ptr) {
|
||||
return *tags_ptr;
|
||||
} else {
|
||||
throw api_error::validation(format("Table {} does not have valid tagging information", schema->ks_name()));
|
||||
}
|
||||
}
|
||||
|
||||
bool is_alternator_keyspace(std::string_view ks_name) {
|
||||
return ks_name.starts_with(executor::KEYSPACE_NAME_PREFIX);
|
||||
}
|
||||
|
||||
// This tag is set on a GSI when the user did not specify a range key, causing
|
||||
// Alternator to add the base table's range key as a spurious range key. It is
|
||||
// used by describe_key_schema() to suppress reporting that key.
|
||||
extern const sstring SPURIOUS_RANGE_KEY_ADDED_TO_GSI_AND_USER_DIDNT_SPECIFY_RANGE_KEY_TAG_KEY;
|
||||
|
||||
void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string, std::string>* attribute_types, const std::map<sstring, sstring>* tags) {
|
||||
rjson::value key_schema = rjson::empty_array();
|
||||
const bool ignore_range_keys_as_spurious = tags != nullptr && tags->contains(SPURIOUS_RANGE_KEY_ADDED_TO_GSI_AND_USER_DIDNT_SPECIFY_RANGE_KEY_TAG_KEY);
|
||||
|
||||
for (const column_definition& cdef : schema.partition_key_columns()) {
|
||||
rjson::value key = rjson::empty_object();
|
||||
rjson::add(key, "AttributeName", rjson::from_string(cdef.name_as_text()));
|
||||
rjson::add(key, "KeyType", "HASH");
|
||||
rjson::push_back(key_schema, std::move(key));
|
||||
if (attribute_types) {
|
||||
(*attribute_types)[cdef.name_as_text()] = type_to_string(cdef.type);
|
||||
}
|
||||
}
|
||||
if (!ignore_range_keys_as_spurious) {
|
||||
// NOTE: user requested key (there can be at most one) will always come first.
|
||||
// There might be more keys following it, which were added, but those were
|
||||
// not requested by the user, so we ignore them.
|
||||
for (const column_definition& cdef : schema.clustering_key_columns()) {
|
||||
rjson::value key = rjson::empty_object();
|
||||
rjson::add(key, "AttributeName", rjson::from_string(cdef.name_as_text()));
|
||||
rjson::add(key, "KeyType", "RANGE");
|
||||
rjson::push_back(key_schema, std::move(key));
|
||||
if (attribute_types) {
|
||||
(*attribute_types)[cdef.name_as_text()] = type_to_string(cdef.type);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
rjson::add(parent, "KeySchema", std::move(key_schema));
|
||||
}
|
||||
|
||||
// Check if the given string has valid characters for a table name, i.e. only
|
||||
// a-z, A-Z, 0-9, _ (underscore), - (dash), . (dot). Note that this function
|
||||
// does not check the length of the name - instead, use validate_table_name()
|
||||
// to validate both the characters and the length.
|
||||
static bool valid_table_name_chars(std::string_view name) {
|
||||
for (auto c : name) {
|
||||
if ((c < 'a' || c > 'z') &&
|
||||
(c < 'A' || c > 'Z') &&
|
||||
(c < '0' || c > '9') &&
|
||||
c != '_' &&
|
||||
c != '-' &&
|
||||
c != '.') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string view_name(std::string_view table_name, std::string_view index_name, const std::string& delim, bool validate_len) {
|
||||
if (index_name.length() < 3) {
|
||||
throw api_error::validation("IndexName must be at least 3 characters long");
|
||||
}
|
||||
if (!valid_table_name_chars(index_name)) {
|
||||
throw api_error::validation(
|
||||
fmt::format("IndexName '{}' must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", index_name));
|
||||
}
|
||||
std::string ret = std::string(table_name) + delim + std::string(index_name);
|
||||
if (ret.length() > max_auxiliary_table_name_length && validate_len) {
|
||||
throw api_error::validation(
|
||||
fmt::format("The total length of TableName ('{}') and IndexName ('{}') cannot exceed {} characters",
|
||||
table_name, index_name, max_auxiliary_table_name_length - delim.size()));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string gsi_name(std::string_view table_name, std::string_view index_name, bool validate_len) {
|
||||
return view_name(table_name, index_name, ":", validate_len);
|
||||
}
|
||||
|
||||
std::string lsi_name(std::string_view table_name, std::string_view index_name, bool validate_len) {
|
||||
return view_name(table_name, index_name, "!:", validate_len);
|
||||
}
|
||||
|
||||
void check_key(const rjson::value& key, const schema_ptr& schema) {
|
||||
if (key.MemberCount() != (schema->clustering_key_size() == 0 ? 1 : 2)) {
|
||||
throw api_error::validation("Given key attribute not in schema");
|
||||
}
|
||||
}
|
||||
|
||||
void verify_all_are_used(const rjson::value* field,
|
||||
const std::unordered_set<std::string>& used, const char* field_name, const char* operation) {
|
||||
if (!field) {
|
||||
return;
|
||||
}
|
||||
for (auto it = field->MemberBegin(); it != field->MemberEnd(); ++it) {
|
||||
if (!used.contains(rjson::to_string(it->name))) {
|
||||
throw api_error::validation(
|
||||
format("{} has spurious '{}', not used in {}",
|
||||
field_name, rjson::to_string_view(it->name), operation));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This function increments the authorization_failures counter, and may also
|
||||
// log a warn-level message and/or throw an access_denied exception, depending
|
||||
// on what enforce_authorization and warn_authorization are set to.
|
||||
// Note that if enforce_authorization is false, this function will return
|
||||
// without throwing. So a caller that doesn't want to continue after an
|
||||
// authorization_error must explicitly return after calling this function.
|
||||
static void authorization_error(stats& stats, bool enforce_authorization, bool warn_authorization, std::string msg) {
|
||||
stats.authorization_failures++;
|
||||
if (enforce_authorization) {
|
||||
if (warn_authorization) {
|
||||
elogger.warn("alternator_warn_authorization=true: {}", msg);
|
||||
}
|
||||
throw api_error::access_denied(std::move(msg));
|
||||
} else {
|
||||
if (warn_authorization) {
|
||||
elogger.warn("If you set alternator_enforce_authorization=true the following will be enforced: {}", msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
future<> verify_permission(
|
||||
bool enforce_authorization,
|
||||
bool warn_authorization,
|
||||
const service::client_state& client_state,
|
||||
const schema_ptr& schema,
|
||||
auth::permission permission_to_check,
|
||||
stats& stats) {
|
||||
if (!enforce_authorization && !warn_authorization) {
|
||||
co_return;
|
||||
}
|
||||
// Unfortunately, the fix for issue #23218 did not modify the function
|
||||
// that we use here - check_has_permissions(). So if we want to allow
|
||||
// writes to internal tables (from try_get_internal_table()) only to a
|
||||
// superuser, we need to explicitly check it here.
|
||||
if (permission_to_check == auth::permission::MODIFY && is_internal_keyspace(schema->ks_name())) {
|
||||
if (!client_state.user() ||
|
||||
!client_state.user()->name ||
|
||||
!co_await client_state.get_auth_service()->underlying_role_manager().is_superuser(*client_state.user()->name)) {
|
||||
sstring username = "<anonymous>";
|
||||
if (client_state.user() && client_state.user()->name) {
|
||||
username = client_state.user()->name.value();
|
||||
}
|
||||
authorization_error(stats, enforce_authorization, warn_authorization, fmt::format(
|
||||
"Write access denied on internal table {}.{} to role {} because it is not a superuser",
|
||||
schema->ks_name(), schema->cf_name(), username));
|
||||
co_return;
|
||||
}
|
||||
}
|
||||
auto resource = auth::make_data_resource(schema->ks_name(), schema->cf_name());
|
||||
if (!client_state.user() || !client_state.user()->name ||
|
||||
!co_await client_state.check_has_permission(auth::command_desc(permission_to_check, resource))) {
|
||||
sstring username = "<anonymous>";
|
||||
if (client_state.user() && client_state.user()->name) {
|
||||
username = client_state.user()->name.value();
|
||||
}
|
||||
// Using exceptions for errors makes this function faster in the
|
||||
// success path (when the operation is allowed).
|
||||
authorization_error(stats, enforce_authorization, warn_authorization, fmt::format(
|
||||
"{} access on table {}.{} is denied to role {}, client address {}",
|
||||
auth::permissions::to_string(permission_to_check),
|
||||
schema->ks_name(), schema->cf_name(), username, client_state.get_client_address()));
|
||||
}
|
||||
}
|
||||
|
||||
// Similar to verify_permission() above, but just for CREATE operations.
|
||||
// Those do not operate on any specific table, so require permissions on
|
||||
// ALL KEYSPACES instead of any specific table.
|
||||
future<> verify_create_permission(bool enforce_authorization, bool warn_authorization, const service::client_state& client_state, stats& stats) {
|
||||
if (!enforce_authorization && !warn_authorization) {
|
||||
co_return;
|
||||
}
|
||||
auto resource = auth::resource(auth::resource_kind::data);
|
||||
if (!co_await client_state.check_has_permission(auth::command_desc(auth::permission::CREATE, resource))) {
|
||||
sstring username = "<anonymous>";
|
||||
if (client_state.user() && client_state.user()->name) {
|
||||
username = client_state.user()->name.value();
|
||||
}
|
||||
authorization_error(stats, enforce_authorization, warn_authorization, fmt::format(
|
||||
"CREATE access on ALL KEYSPACES is denied to role {}", username));
|
||||
}
|
||||
}
|
||||
|
||||
schema_ptr try_get_internal_table(const data_dictionary::database& db, std::string_view table_name) {
|
||||
size_t it = table_name.find(executor::INTERNAL_TABLE_PREFIX);
|
||||
if (it != 0) {
|
||||
return schema_ptr{};
|
||||
}
|
||||
table_name.remove_prefix(executor::INTERNAL_TABLE_PREFIX.size());
|
||||
size_t delim = table_name.find_first_of('.');
|
||||
if (delim == std::string_view::npos) {
|
||||
return schema_ptr{};
|
||||
}
|
||||
std::string_view ks_name = table_name.substr(0, delim);
|
||||
table_name.remove_prefix(ks_name.size() + 1);
|
||||
// Only internal keyspaces can be accessed to avoid leakage
|
||||
auto ks = db.try_find_keyspace(ks_name);
|
||||
if (!ks || !ks->is_internal()) {
|
||||
return schema_ptr{};
|
||||
}
|
||||
try {
|
||||
return db.find_schema(ks_name, table_name);
|
||||
} catch (data_dictionary::no_such_column_family&) {
|
||||
// DynamoDB returns validation error even when table does not exist
|
||||
// and the table name is invalid.
|
||||
validate_table_name(table_name);
|
||||
throw api_error::resource_not_found(
|
||||
fmt::format("Requested resource not found: Internal table: {}.{} not found", ks_name, table_name));
|
||||
}
|
||||
}
|
||||
|
||||
schema_ptr get_table_from_batch_request(const service::storage_proxy& proxy, const rjson::value::ConstMemberIterator& batch_request) {
|
||||
sstring table_name = rjson::to_sstring(batch_request->name); // JSON keys are always strings
|
||||
try {
|
||||
return proxy.data_dictionary().find_schema(sstring(executor::KEYSPACE_NAME_PREFIX) + table_name, table_name);
|
||||
} catch(data_dictionary::no_such_column_family&) {
|
||||
// DynamoDB returns validation error even when table does not exist
|
||||
// and the table name is invalid.
|
||||
validate_table_name(table_name);
|
||||
throw api_error::resource_not_found(format("Requested resource not found: Table: {} not found", table_name));
|
||||
}
|
||||
}
|
||||
|
||||
lw_shared_ptr<stats> get_stats_from_schema(service::storage_proxy& sp, const schema& schema) {
|
||||
try {
|
||||
replica::table& table = sp.local_db().find_column_family(schema.id());
|
||||
if (!table.get_stats().alternator_stats) {
|
||||
table.get_stats().alternator_stats = seastar::make_shared<table_stats>(schema.ks_name(), schema.cf_name());
|
||||
}
|
||||
return table.get_stats().alternator_stats->_stats;
|
||||
} catch (std::runtime_error&) {
|
||||
// If we're here it means that a table we are currently working on was deleted before the
|
||||
// operation completed, returning a temporary object is fine, if the table get deleted so will its metrics
|
||||
return make_lw_shared<stats>();
|
||||
}
|
||||
}
|
||||
|
||||
void describe_single_item(const cql3::selection::selection& selection,
|
||||
const std::vector<managed_bytes_opt>& result_row,
|
||||
const std::optional<attrs_to_get>& attrs_to_get,
|
||||
rjson::value& item,
|
||||
uint64_t* item_length_in_bytes,
|
||||
bool include_all_embedded_attributes)
|
||||
{
|
||||
const auto& columns = selection.get_columns();
|
||||
auto column_it = columns.begin();
|
||||
for (const managed_bytes_opt& cell : result_row) {
|
||||
if (!cell) {
|
||||
++column_it;
|
||||
continue;
|
||||
}
|
||||
std::string column_name = (*column_it)->name_as_text();
|
||||
if (column_name != executor::ATTRS_COLUMN_NAME) {
|
||||
if (item_length_in_bytes) {
|
||||
(*item_length_in_bytes) += column_name.length() + cell->size();
|
||||
}
|
||||
if (!attrs_to_get || attrs_to_get->contains(column_name)) {
|
||||
// item is expected to start empty, and column_name are unique
|
||||
// so add() makes sense
|
||||
rjson::add_with_string_name(item, column_name, rjson::empty_object());
|
||||
rjson::value& field = item[column_name.c_str()];
|
||||
cell->with_linearized([&] (bytes_view linearized_cell) {
|
||||
rjson::add_with_string_name(field, type_to_string((*column_it)->type), json_key_column_value(linearized_cell, **column_it));
|
||||
});
|
||||
}
|
||||
} else {
|
||||
auto deserialized = attrs_type()->deserialize(*cell);
|
||||
auto keys_and_values = value_cast<map_type_impl::native_type>(deserialized);
|
||||
for (auto entry : keys_and_values) {
|
||||
std::string attr_name = value_cast<sstring>(entry.first);
|
||||
if (item_length_in_bytes) {
|
||||
(*item_length_in_bytes) += attr_name.length();
|
||||
}
|
||||
if (include_all_embedded_attributes || !attrs_to_get || attrs_to_get->contains(attr_name)) {
|
||||
bytes value = value_cast<bytes>(entry.second);
|
||||
if (item_length_in_bytes && value.length()) {
|
||||
// ScyllaDB uses one extra byte compared to DynamoDB for the bytes length
|
||||
(*item_length_in_bytes) += value.length() - 1;
|
||||
}
|
||||
rjson::value v = deserialize_item(value);
|
||||
if (attrs_to_get) {
|
||||
auto it = attrs_to_get->find(attr_name);
|
||||
if (it != attrs_to_get->end()) {
|
||||
// attrs_to_get may have asked for only part of
|
||||
// this attribute. hierarchy_filter() modifies v,
|
||||
// and returns false when nothing is to be kept.
|
||||
if (!hierarchy_filter(v, it->second)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// item is expected to start empty, and attribute
|
||||
// names are unique so add() makes sense
|
||||
rjson::add_with_string_name(item, attr_name, std::move(v));
|
||||
} else if (item_length_in_bytes) {
|
||||
(*item_length_in_bytes) += value_cast<bytes>(entry.second).length() - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
++column_it;
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<rjson::value> describe_single_item(schema_ptr schema,
|
||||
const query::partition_slice& slice,
|
||||
const cql3::selection::selection& selection,
|
||||
const query::result& query_result,
|
||||
const std::optional<attrs_to_get>& attrs_to_get,
|
||||
uint64_t* item_length_in_bytes) {
|
||||
rjson::value item = rjson::empty_object();
|
||||
|
||||
cql3::selection::result_set_builder builder(selection, gc_clock::now());
|
||||
query::result_view::consume(query_result, slice, cql3::selection::result_set_builder::visitor(builder, *schema, selection));
|
||||
|
||||
auto result_set = builder.build();
|
||||
if (result_set->empty()) {
|
||||
if (item_length_in_bytes) {
|
||||
// empty results is counted as having a minimal length (e.g. 1 byte).
|
||||
(*item_length_in_bytes) += 1;
|
||||
}
|
||||
// If there is no matching item, we're supposed to return an empty
|
||||
// object without an Item member - not one with an empty Item member
|
||||
return {};
|
||||
}
|
||||
if (result_set->size() > 1) {
|
||||
// If the result set contains multiple rows, the code should have
|
||||
// called describe_multi_item(), not this function.
|
||||
throw std::logic_error("describe_single_item() asked to describe multiple items");
|
||||
}
|
||||
describe_single_item(selection, *result_set->rows().begin(), attrs_to_get, item, item_length_in_bytes);
|
||||
return item;
|
||||
}
|
||||
|
||||
static void check_big_array(const rjson::value& val, int& size_left);
|
||||
static void check_big_object(const rjson::value& val, int& size_left);
|
||||
|
||||
// For simplicity, we use a recursive implementation. This is fine because
|
||||
// Alternator limits the depth of JSONs it reads from inputs, and doesn't
|
||||
// add more than a couple of levels in its own output construction.
|
||||
bool is_big(const rjson::value& val, int big_size) {
|
||||
if (val.IsString()) {
|
||||
return ssize_t(val.GetStringLength()) > big_size;
|
||||
} else if (val.IsObject()) {
|
||||
check_big_object(val, big_size);
|
||||
return big_size < 0;
|
||||
} else if (val.IsArray()) {
|
||||
check_big_array(val, big_size);
|
||||
return big_size < 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void check_big_array(const rjson::value& val, int& size_left) {
|
||||
// Assume a fixed size of 10 bytes for each number, boolean, etc., or
|
||||
// beginning of a sub-object. This doesn't have to be accurate.
|
||||
size_left -= 10 * val.Size();
|
||||
for (const auto& v : val.GetArray()) {
|
||||
if (size_left < 0) {
|
||||
return;
|
||||
}
|
||||
// Note that we avoid recursive calls for the leaves (anything except
|
||||
// array or object) because usually those greatly outnumber the trunk.
|
||||
if (v.IsString()) {
|
||||
size_left -= v.GetStringLength();
|
||||
} else if (v.IsObject()) {
|
||||
check_big_object(v, size_left);
|
||||
} else if (v.IsArray()) {
|
||||
check_big_array(v, size_left);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void check_big_object(const rjson::value& val, int& size_left) {
|
||||
size_left -= 10 * val.MemberCount();
|
||||
for (const auto& m : val.GetObject()) {
|
||||
if (size_left < 0) {
|
||||
return;
|
||||
}
|
||||
size_left -= m.name.GetStringLength();
|
||||
if (m.value.IsString()) {
|
||||
size_left -= m.value.GetStringLength();
|
||||
} else if (m.value.IsObject()) {
|
||||
check_big_object(m.value, size_left);
|
||||
} else if (m.value.IsArray()) {
|
||||
check_big_array(m.value, size_left);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void validate_table_name(std::string_view name, const char* source) {
|
||||
if (name.length() < 3 || name.length() > max_table_name_length) {
|
||||
throw api_error::validation(
|
||||
format("{} must be at least 3 characters long and at most {} characters long", source, max_table_name_length));
|
||||
}
|
||||
if (!valid_table_name_chars(name)) {
|
||||
throw api_error::validation(
|
||||
format("{} must satisfy regular expression pattern: [a-zA-Z0-9_.-]+", source));
|
||||
}
|
||||
}
|
||||
|
||||
void validate_cdc_log_name_length(std::string_view table_name) {
|
||||
if (cdc::log_name(table_name).length() > max_auxiliary_table_name_length) {
|
||||
// CDC will add cdc_log_suffix ("_scylla_cdc_log") to the table name
|
||||
// to create its log table, and this will exceed the maximum allowed
|
||||
// length. To provide a more helpful error message, we assume that
|
||||
// cdc::log_name() always adds a suffix of the same length.
|
||||
int suffix_len = cdc::log_name(table_name).length() - table_name.length();
|
||||
throw api_error::validation(fmt::format("Streams or vector search cannot be enabled on a table whose name is longer than {} characters: {}",
|
||||
max_auxiliary_table_name_length - suffix_len, table_name));
|
||||
}
|
||||
}
|
||||
|
||||
body_writer make_streamed(rjson::value&& value) {
|
||||
return [value = std::move(value)](output_stream<char>&& _out) mutable -> future<> {
|
||||
auto out = std::move(_out);
|
||||
std::exception_ptr ex;
|
||||
try {
|
||||
co_await rjson::print(value, out);
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
co_await out.close();
|
||||
co_await rjson::destroy_gently(std::move(value));
|
||||
if (ex) {
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace alternator
|
||||
@@ -1,247 +0,0 @@
|
||||
/*
|
||||
* Copyright 2019-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
*/
|
||||
|
||||
// This header file, and the implementation file executor_util.cc, contain
|
||||
// various utility functions that are reused in many different operations
|
||||
// (API requests) across Alternator's code - in files such as executor.cc,
|
||||
// executor_read.cc, streams.cc, ttl.cc, and more. These utility functions
|
||||
// include things like extracting and validating pieces from a JSON request,
|
||||
// checking permissions, constructing auxiliary table names, and more.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include <seastar/core/future.hh>
|
||||
#include <seastar/util/noncopyable_function.hh>
|
||||
|
||||
#include "utils/rjson.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include "types/types.hh"
|
||||
#include "auth/permission.hh"
|
||||
#include "alternator/stats.hh"
|
||||
#include "alternator/attribute_path.hh"
|
||||
#include "utils/managed_bytes.hh"
|
||||
|
||||
namespace query { class partition_slice; class result; }
|
||||
namespace cql3::selection { class selection; }
|
||||
namespace data_dictionary { class database; }
|
||||
namespace service { class storage_proxy; class client_state; }
|
||||
|
||||
namespace alternator {
|
||||
|
||||
/// The body_writer is used for streaming responses - where the response body
|
||||
/// is written in chunks to the output_stream. This allows for efficient
|
||||
/// handling of large responses without needing to allocate a large buffer in
|
||||
/// memory. It is one of the variants of executor::request_return_type.
|
||||
using body_writer = noncopyable_function<future<>(output_stream<char>&&)>;
|
||||
|
||||
/// Get the value of an integer attribute, or an empty optional if it is
|
||||
/// missing. If the attribute exists, but is not an integer, a descriptive
|
||||
/// api_error is thrown.
|
||||
std::optional<int> get_int_attribute(const rjson::value& value, std::string_view attribute_name);
|
||||
|
||||
/// Get the value of a string attribute, or a default value if it is missing.
|
||||
/// If the attribute exists, but is not a string, a descriptive api_error is
|
||||
/// thrown.
|
||||
std::string get_string_attribute(const rjson::value& value, std::string_view attribute_name, const char* default_return);
|
||||
|
||||
/// Get the value of a boolean attribute, or a default value if it is missing.
|
||||
/// If the attribute exists, but is not a bool, a descriptive api_error is
|
||||
/// thrown.
|
||||
bool get_bool_attribute(const rjson::value& value, std::string_view attribute_name, bool default_return);
|
||||
|
||||
/// Extract table name from a request.
|
||||
/// Most requests expect the table's name to be listed in a "TableName" field.
|
||||
/// get_table_name() returns the name or api_error in case the table name is
|
||||
/// missing or not a string.
|
||||
std::string get_table_name(const rjson::value& request);
|
||||
|
||||
/// find_table_name() is like get_table_name() except that it returns an
|
||||
/// optional table name - it returns an empty optional when the TableName
|
||||
/// is missing from the request, instead of throwing as get_table_name()
|
||||
/// does. However, find_table_name() still throws if a TableName exists but
|
||||
/// is not a string.
|
||||
std::optional<std::string> find_table_name(const rjson::value& request);
|
||||
|
||||
/// Extract table schema from a request.
|
||||
/// Many requests expect the table's name to be listed in a "TableName" field
|
||||
/// and need to look it up as an existing table. The get_table() function
|
||||
/// does this, with the appropriate validation and api_error in case the table
|
||||
/// name is missing, invalid or the table doesn't exist. If everything is
|
||||
/// successful, it returns the table's schema.
|
||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
|
||||
/// This find_table() variant is like get_table() excepts that it returns a
|
||||
/// nullptr instead of throwing if the request does not mention a TableName.
|
||||
/// In other cases of errors (i.e., a table is mentioned but doesn't exist)
|
||||
/// this function throws too.
|
||||
schema_ptr find_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||
|
||||
/// This find_table() variant is like the previous one except that it takes
|
||||
/// the table name directly instead of a request object. It is used in cases
|
||||
/// where we already have the table name extracted from the request.
|
||||
schema_ptr find_table(service::storage_proxy& proxy, std::string_view table_name);
|
||||
|
||||
// We would have liked to support table names up to 255 bytes, like DynamoDB.
|
||||
// But Scylla creates a directory whose name is the table's name plus 33
|
||||
// bytes (dash and UUID), and since directory names are limited to 255 bytes,
|
||||
// we need to limit table names to 222 bytes, instead of 255. See issue #4480.
|
||||
// We actually have two limits here,
|
||||
// * max_table_name_length is the limit that Alternator will impose on names
|
||||
// of new Alternator tables.
|
||||
// * max_auxiliary_table_name_length is the potentially higher absolute limit
|
||||
// that Scylla imposes on the names of auxiliary tables that Alternator
|
||||
// wants to create internally - i.e. materialized views or CDC log tables.
|
||||
// The second limit might mean that it is not possible to add a GSI to an
|
||||
// existing table, because the name of the new auxiliary table may go over
|
||||
// the limit. The second limit is also one of the reasons why the first limit
|
||||
// is set lower than 222 - to have room to enable streams which add the extra
|
||||
// suffix "_scylla_cdc_log" to the table name.
|
||||
inline constexpr int max_table_name_length = 192;
|
||||
inline constexpr int max_auxiliary_table_name_length = 222;
|
||||
|
||||
/// validate_table_name() validates the TableName parameter in a request - it
|
||||
/// should be called in CreateTable, and in other requests only when noticing
|
||||
/// that the named table doesn't exist.
|
||||
/// The DynamoDB developer guide, https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.NamingRules
|
||||
/// specifies that table "names must be between 3 and 255 characters long and
|
||||
/// can contain only the following characters: a-z, A-Z, 0-9, _ (underscore),
|
||||
/// - (dash), . (dot)". However, Alternator only allows max_table_name_length
|
||||
/// characters (see above) - not 255.
|
||||
/// validate_table_name() throws the appropriate api_error if this validation
|
||||
/// fails.
|
||||
void validate_table_name(std::string_view name, const char* source = "TableName");
|
||||
|
||||
/// Validate that a CDC log table could be created for the base table with a
|
||||
/// given table_name, and if not, throw a user-visible api_error::validation.
|
||||
/// It is not possible to create a CDC log table if the table name is so long
|
||||
/// that adding the 15-character suffix "_scylla_cdc_log" (cdc_log_suffix)
|
||||
/// makes it go over max_auxiliary_table_name_length.
|
||||
/// Note that if max_table_name_length is set to less than 207 (which is
|
||||
/// max_auxiliary_table_name_length-15), then this function will never
|
||||
/// fail. However, it's still important to call it in UpdateTable, in case
|
||||
/// we have pre-existing tables with names longer than this to avoid #24598.
|
||||
void validate_cdc_log_name_length(std::string_view table_name);
|
||||
|
||||
/// Checks if a keyspace, given by its name, is an Alternator keyspace.
|
||||
/// This just checks if the name begins in executor::KEYSPACE_NAME_PREFIX,
|
||||
/// a prefix that all keyspaces created by Alternator's CreateTable use.
|
||||
bool is_alternator_keyspace(std::string_view ks_name);
|
||||
|
||||
/// Wraps db::get_tags_of_table() and throws api_error::validation if the
|
||||
/// table is missing the tags extension.
|
||||
const std::map<sstring, sstring>& get_tags_of_table_or_throw(schema_ptr schema);
|
||||
|
||||
/// Returns a type object representing the type of the ":attrs" column used
|
||||
/// by Alternator to store all non-key attribute. This type is a map from
|
||||
/// string (attribute name) to bytes (serialized attribute value).
|
||||
map_type attrs_type();
|
||||
|
||||
// In DynamoDB index names are local to a table, while in Scylla, materialized
|
||||
// view names are global (in a keyspace). So we need to compose a unique name
|
||||
// for the view taking into account both the table's name and the index name.
|
||||
// We concatenate the table and index name separated by a delim character
|
||||
// (a character not allowed by DynamoDB in ordinary table names, default: ":").
|
||||
// The downside of this approach is that it limits the sum of the lengths,
|
||||
// instead of each component individually as DynamoDB does.
|
||||
// The view_name() function assumes the table_name has already been validated
|
||||
// but validates the legality of index_name and the combination of both.
|
||||
std::string view_name(std::string_view table_name, std::string_view index_name,
|
||||
const std::string& delim = ":", bool validate_len = true);
|
||||
std::string gsi_name(std::string_view table_name, std::string_view index_name,
|
||||
bool validate_len = true);
|
||||
std::string lsi_name(std::string_view table_name, std::string_view index_name,
|
||||
bool validate_len = true);
|
||||
|
||||
/// After calling pk_from_json() and ck_from_json() to extract the pk and ck
|
||||
/// components of a key, and if that succeeded, call check_key() to further
|
||||
/// check that the key doesn't have any spurious components.
|
||||
void check_key(const rjson::value& key, const schema_ptr& schema);
|
||||
|
||||
/// Fail with api_error::validation if the expression if has unused attribute
|
||||
/// names or values. This is how DynamoDB behaves, so we do too.
|
||||
void verify_all_are_used(const rjson::value* field,
|
||||
const std::unordered_set<std::string>& used,
|
||||
const char* field_name,
|
||||
const char* operation);
|
||||
|
||||
/// Check CQL's Role-Based Access Control (RBAC) permission (MODIFY,
|
||||
/// SELECT, DROP, etc.) on the given table. When permission is denied an
|
||||
/// appropriate user-readable api_error::access_denied is thrown.
|
||||
future<> verify_permission(bool enforce_authorization, bool warn_authorization, const service::client_state&, const schema_ptr&, auth::permission, stats& stats);
|
||||
|
||||
/// Similar to verify_permission() above, but just for CREATE operations.
|
||||
/// Those do not operate on any specific table, so require permissions on
|
||||
/// ALL KEYSPACES instead of any specific table.
|
||||
future<> verify_create_permission(bool enforce_authorization, bool warn_authorization, const service::client_state&, stats& stats);
|
||||
|
||||
// Sets a KeySchema JSON array inside the given parent object describing the
|
||||
// key attributes of the given schema as HASH or RANGE keys. Additionally,
|
||||
// adds mappings from key attribute names to their DynamoDB type string into
|
||||
// attribute_types.
|
||||
void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string, std::string>* attribute_types = nullptr, const std::map<sstring, sstring>* tags = nullptr);
|
||||
|
||||
/// is_big() checks approximately if the given JSON value is "bigger" than
|
||||
/// the given big_size number of bytes. The goal is to *quickly* detect
|
||||
/// oversized JSON that, for example, is too large to be serialized to a
|
||||
/// contiguous string - we don't need an accurate size for that. Moreover,
|
||||
/// as soon as we detect that the JSON is indeed "big", we can return true
|
||||
/// and don't need to continue calculating its exact size.
|
||||
bool is_big(const rjson::value& val, int big_size = 100'000);
|
||||
|
||||
/// try_get_internal_table() handles the special case that the given table_name
|
||||
/// begins with INTERNAL_TABLE_PREFIX (".scylla.alternator."). In that case,
|
||||
/// this function assumes that the rest of the name refers to an internal
|
||||
/// Scylla table (e.g., system table) and returns the schema of that table -
|
||||
/// or an exception if it doesn't exist. Otherwise, if table_name does not
|
||||
/// start with INTERNAL_TABLE_PREFIX, this function returns an empty schema_ptr
|
||||
/// and the caller should look for a normal Alternator table with that name.
|
||||
schema_ptr try_get_internal_table(const data_dictionary::database& db, std::string_view table_name);
|
||||
|
||||
/// get_table_from_batch_request() is used by batch write/read operations to
|
||||
/// look up the schema for a table named in a batch request, by the JSON member
|
||||
/// name (which is the table name in a BatchWriteItem or BatchGetItem request).
|
||||
schema_ptr get_table_from_batch_request(const service::storage_proxy& proxy, const rjson::value::ConstMemberIterator& batch_request);
|
||||
|
||||
/// Returns (or lazily creates) the per-table stats object for the given schema.
|
||||
/// If the table has been deleted, returns a temporary stats object.
|
||||
lw_shared_ptr<stats> get_stats_from_schema(service::storage_proxy& sp, const schema& schema);
|
||||
|
||||
/// Writes one item's attributes into `item` from the given selection result
|
||||
/// row. If include_all_embedded_attributes is true, all attributes from the
|
||||
/// ATTRS_COLUMN map column are included regardless of attrs_to_get.
|
||||
void describe_single_item(const cql3::selection::selection&,
|
||||
const std::vector<managed_bytes_opt>&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
rjson::value&,
|
||||
uint64_t* item_length_in_bytes = nullptr,
|
||||
bool include_all_embedded_attributes = false);
|
||||
|
||||
/// Converts a single result row to a JSON item, or returns an empty optional
|
||||
/// if the result is empty.
|
||||
std::optional<rjson::value> describe_single_item(schema_ptr,
|
||||
const query::partition_slice&,
|
||||
const cql3::selection::selection&,
|
||||
const query::result&,
|
||||
const std::optional<attrs_to_get>&,
|
||||
uint64_t* item_length_in_bytes = nullptr);
|
||||
|
||||
/// Make a body_writer (function that can write output incrementally to the
|
||||
/// HTTP stream) from the given JSON object.
|
||||
/// Note: only useful for (very) large objects as there are overhead issues
|
||||
/// with this as well, but for massive lists of return objects this can
|
||||
/// help avoid large allocations/many re-allocs.
|
||||
body_writer make_streamed(rjson::value&&);
|
||||
|
||||
} // namespace alternator
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "expressions.hh"
|
||||
@@ -744,7 +744,7 @@ void validate_attr_name_length(std::string_view supplementary_context, size_t at
|
||||
constexpr const size_t DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX = 65535;
|
||||
|
||||
const size_t max_length = is_key ? DYNAMODB_KEY_ATTR_NAME_SIZE_MAX : DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX;
|
||||
if (attr_name_length > max_length || attr_name_length == 0) {
|
||||
if (attr_name_length > max_length) {
|
||||
std::string error_msg;
|
||||
if (!error_msg_prefix.empty()) {
|
||||
error_msg += error_msg_prefix;
|
||||
@@ -754,11 +754,7 @@ void validate_attr_name_length(std::string_view supplementary_context, size_t at
|
||||
error_msg += supplementary_context;
|
||||
error_msg += " - ";
|
||||
}
|
||||
if (attr_name_length == 0) {
|
||||
error_msg += "Empty attribute name";
|
||||
} else {
|
||||
error_msg += fmt::format("Attribute name is too large, must be less than {} bytes", std::to_string(max_length + 1));
|
||||
}
|
||||
error_msg += fmt::format("Attribute name is too large, must be less than {} bytes", std::to_string(max_length + 1));
|
||||
throw api_error::validation(error_msg);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "alternator/http_compression.hh"
|
||||
@@ -264,7 +264,7 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
body_writer compress(response_compressor::compression_type ct, const db::config& cfg, body_writer&& bw) {
|
||||
executor::body_writer compress(response_compressor::compression_type ct, const db::config& cfg, executor::body_writer&& bw) {
|
||||
return [bw = std::move(bw), ct, level = cfg.alternator_response_gzip_compression_level()](output_stream<char>&& out) mutable -> future<> {
|
||||
output_stream_options opts;
|
||||
opts.trim_to_size = true;
|
||||
@@ -287,7 +287,7 @@ body_writer compress(response_compressor::compression_type ct, const db::config&
|
||||
};
|
||||
}
|
||||
|
||||
future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, const char* content_type, body_writer&& body_writer) {
|
||||
future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, const char* content_type, executor::body_writer&& body_writer) {
|
||||
response_compressor::compression_type ct = find_compression(accept_encoding, std::numeric_limits<size_t>::max());
|
||||
if (ct != response_compressor::compression_type::none) {
|
||||
rep->add_header("Content-Encoding", get_encoding_name(ct));
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -85,7 +85,7 @@ public:
|
||||
future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep,
|
||||
sstring accept_encoding, const char* content_type, std::string&& response_body);
|
||||
future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep,
|
||||
sstring accept_encoding, const char* content_type, body_writer&& body_writer);
|
||||
sstring accept_encoding, const char* content_type, executor::body_writer&& body_writer);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "expressions.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "utils/base64.hh"
|
||||
@@ -14,12 +14,12 @@
|
||||
#include "types/concrete_types.hh"
|
||||
#include "types/json_utils.hh"
|
||||
#include "mutation/position_in_partition.hh"
|
||||
#include "alternator/executor_util.hh"
|
||||
|
||||
static logging::logger slogger("alternator-serialization");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
bool is_alternator_keyspace(const sstring& ks_name);
|
||||
|
||||
type_info type_info_from_string(std::string_view type) {
|
||||
static thread_local const std::unordered_map<std::string_view, type_info> type_infos = {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,12 +3,10 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "alternator/server.hh"
|
||||
#include "audit/audit.hh"
|
||||
#include "alternator/executor_util.hh"
|
||||
#include "gms/application_state.hh"
|
||||
#include "utils/log.hh"
|
||||
#include <fmt/ranges.h>
|
||||
@@ -144,7 +142,7 @@ public:
|
||||
return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding),
|
||||
REPLY_CONTENT_TYPE, std::move(str));
|
||||
},
|
||||
[&] (body_writer&& body_writer) {
|
||||
[&] (executor::body_writer&& body_writer) {
|
||||
return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding),
|
||||
REPLY_CONTENT_TYPE, std::move(body_writer));
|
||||
},
|
||||
@@ -787,25 +785,12 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
||||
auto f = [this, content = std::move(content), &callback = callback_it->second,
|
||||
client_state = std::move(client_state), trace_state = std::move(trace_state),
|
||||
units = std::move(units), req = std::move(req)] () mutable -> future<executor::request_return_type> {
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
if (!json_request.IsObject()) {
|
||||
co_return api_error::validation("Request content must be an object");
|
||||
}
|
||||
std::unique_ptr<audit::audit_info_alternator> audit_info;
|
||||
std::exception_ptr ex = {};
|
||||
executor::request_return_type ret;
|
||||
try {
|
||||
ret = co_await callback(_executor, client_state, trace_state, make_service_permit(std::move(units)), std::move(json_request), std::move(req), audit_info);
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
if (audit_info) {
|
||||
co_await audit::inspect(*audit_info, client_state, ex != nullptr);
|
||||
}
|
||||
if (ex) {
|
||||
co_return coroutine::exception(std::move(ex));
|
||||
}
|
||||
co_return ret;
|
||||
rjson::value json_request = co_await _json_parser.parse(std::move(content));
|
||||
if (!json_request.IsObject()) {
|
||||
co_return api_error::validation("Request content must be an object");
|
||||
}
|
||||
co_return co_await callback(_executor, client_state, trace_state,
|
||||
make_service_permit(std::move(units)), std::move(json_request), std::move(req));
|
||||
};
|
||||
co_return co_await _sl_controller.with_user_service_level(user, std::ref(f));
|
||||
}
|
||||
@@ -849,77 +834,77 @@ server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gos
|
||||
, _pending_requests("alternator::server::pending_requests")
|
||||
, _timeout_config(_proxy.data_dictionary().get_config())
|
||||
, _callbacks{
|
||||
{"CreateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.create_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"CreateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.create_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.describe_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"DescribeTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DeleteTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.delete_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"DeleteTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.delete_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.update_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"UpdateTable", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_table(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"PutItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.put_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"PutItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.put_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.update_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"UpdateItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"GetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DeleteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.delete_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"DeleteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.delete_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListTables", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.list_tables(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"ListTables", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_tables(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"Scan", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.scan(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"Scan", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.scan(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.describe_endpoints(client_state, std::move(permit), std::move(json_request), req->get_header("Host"), audit_info);
|
||||
{"DescribeEndpoints", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_endpoints(client_state, std::move(permit), std::move(json_request), req->get_header("Host"));
|
||||
}},
|
||||
{"BatchWriteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.batch_write_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"BatchWriteItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.batch_write_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"BatchGetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.batch_get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"BatchGetItem", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.batch_get_item(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"Query", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.query(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"Query", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.query(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"TagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.tag_resource(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"TagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.tag_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UntagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.untag_resource(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"UntagResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.untag_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListTagsOfResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.list_tags_of_resource(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"ListTagsOfResource", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_tags_of_resource(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"UpdateTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.update_time_to_live(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"UpdateTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.update_time_to_live(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.describe_time_to_live(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"DescribeTimeToLive", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_time_to_live(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"ListStreams", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.list_streams(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"ListStreams", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.list_streams(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeStream", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.describe_stream(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"DescribeStream", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_stream(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetShardIterator", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.get_shard_iterator(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"GetShardIterator", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_shard_iterator(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request), audit_info);
|
||||
{"GetRecords", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.get_records(client_state, std::move(trace_state), std::move(permit), std::move(json_request));
|
||||
}},
|
||||
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request), audit_info);
|
||||
{"DescribeContinuousBackups", [] (executor& e, executor::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit, rjson::value json_request, std::unique_ptr<request> req) {
|
||||
return e.describe_continuous_backups(client_state, std::move(permit), std::move(json_request));
|
||||
}},
|
||||
} {
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
@@ -34,7 +34,7 @@ class server : public peering_sharded_service<server> {
|
||||
// DynamoDB also has the same limit set to 16 MB.
|
||||
static constexpr size_t request_content_length_limit = 16*MB;
|
||||
using alternator_callback = std::function<future<executor::request_return_type>(executor&, executor::client_state&,
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>, std::unique_ptr<audit::audit_info_alternator>&)>;
|
||||
tracing::trace_state_ptr, service_permit, rjson::value, std::unique_ptr<http::request>)>;
|
||||
using alternator_callbacks_map = std::unordered_map<std::string_view, alternator_callback>;
|
||||
|
||||
httpd::http_server _http_server;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "stats.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,62 +0,0 @@
|
||||
/*
|
||||
* Copyright 2026-present ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "utils/chunked_vector.hh"
|
||||
#include "cdc/generation.hh"
|
||||
#include <generator>
|
||||
|
||||
namespace cdc {
|
||||
class stream_id;
|
||||
}
|
||||
|
||||
namespace alternator {
|
||||
class stream_id_range {
|
||||
// helper class for manipulating (possibly wrapped around) range of stream_ids
|
||||
// it holds one or two ranges [lo1, end1) and [lo2, end2)
|
||||
// if the range doesn't wrap around, then lo2 == end2 == items.end()
|
||||
// if the range wraps around, then
|
||||
// `lo1 == items.begin() and end2 == items.end()` must be true
|
||||
// the object doesn't own `items`, but it does manipulate it - it will
|
||||
// reorder elements (so both ranges were next to each other) and sort them by unsigned comparison
|
||||
// usage - create an object with needed ranges. before iteration call `prepare_for_iterating` method -
|
||||
// it will reorder elements of `items` array to what is needed and then call begin / end pair.
|
||||
// note - `items` array will be modified - elements will be reordered, but no elements will be added or removed.
|
||||
// `items` array must stay intact as long as iteration is in progress.
|
||||
utils::chunked_vector<cdc::stream_id>::iterator _lo1 = {}, _end1 = {}, _lo2 = {}, _end2 = {};
|
||||
const cdc::stream_id* _skip_to = nullptr;
|
||||
bool _prepared = false;
|
||||
public:
|
||||
stream_id_range(
|
||||
utils::chunked_vector<cdc::stream_id> &items,
|
||||
utils::chunked_vector<cdc::stream_id>::iterator lo1,
|
||||
utils::chunked_vector<cdc::stream_id>::iterator end1);
|
||||
stream_id_range(
|
||||
utils::chunked_vector<cdc::stream_id> &items,
|
||||
utils::chunked_vector<cdc::stream_id>::iterator lo1,
|
||||
utils::chunked_vector<cdc::stream_id>::iterator end1,
|
||||
utils::chunked_vector<cdc::stream_id>::iterator lo2,
|
||||
utils::chunked_vector<cdc::stream_id>::iterator end2);
|
||||
|
||||
void set_starting_position(const cdc::stream_id &update_to);
|
||||
// Must be called after construction and after set_starting_position()
|
||||
// (if used), but before begin()/end() iteration.
|
||||
void prepare_for_iterating();
|
||||
|
||||
utils::chunked_vector<cdc::stream_id>::iterator begin() const { return _lo1; }
|
||||
utils::chunked_vector<cdc::stream_id>::iterator end() const { return _end1; }
|
||||
};
|
||||
|
||||
stream_id_range find_children_range_from_parent_token(
|
||||
const utils::chunked_vector<cdc::stream_id>& parent_streams,
|
||||
utils::chunked_vector<cdc::stream_id>& current_streams,
|
||||
cdc::stream_id parent,
|
||||
bool uses_tablets
|
||||
);
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <chrono>
|
||||
@@ -44,7 +44,6 @@
|
||||
#include "cql3/query_options.hh"
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include "alternator/executor.hh"
|
||||
#include "alternator/executor_util.hh"
|
||||
#include "alternator/controller.hh"
|
||||
#include "alternator/serialization.hh"
|
||||
#include "alternator/ttl_tag.hh"
|
||||
@@ -59,17 +58,13 @@ static logging::logger tlogger("alternator_ttl");
|
||||
|
||||
namespace alternator {
|
||||
|
||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.update_time_to_live++;
|
||||
if (!_proxy.features().alternator_ttl) {
|
||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Upgrade all nodes to a version that supports it.");
|
||||
}
|
||||
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
|
||||
maybe_audit(audit_info, audit::statement_category::DDL,
|
||||
schema->ks_name(), schema->cf_name(), "UpdateTimeToLive", request);
|
||||
|
||||
rjson::value* spec = rjson::find(request, "TimeToLiveSpecification");
|
||||
if (!spec || !spec->IsObject()) {
|
||||
co_return api_error::validation("UpdateTimeToLive missing mandatory TimeToLiveSpecification");
|
||||
@@ -119,13 +114,9 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
||||
co_return rjson::print(std::move(response));
|
||||
}
|
||||
|
||||
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request, std::unique_ptr<audit::audit_info_alternator>& audit_info) {
|
||||
future<executor::request_return_type> executor::describe_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||
_stats.api_operations.describe_time_to_live++;
|
||||
schema_ptr schema = get_table(_proxy, request);
|
||||
|
||||
maybe_audit(audit_info, audit::statement_category::QUERY,
|
||||
schema->ks_name(), schema->cf_name(), "DescribeTimeToLive", request);
|
||||
|
||||
std::map<sstring, sstring> tags_map = get_tags_of_table_or_throw(schema);
|
||||
rjson::value desc = rjson::empty_object();
|
||||
auto i = tags_map.find(TTL_TAG_KEY);
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -743,7 +743,7 @@
|
||||
"parameters":[
|
||||
{
|
||||
"name":"tag",
|
||||
"description":"The snapshot tag to delete. If omitted, all snapshots are removed.",
|
||||
"description":"the tag given to the snapshot",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -751,7 +751,7 @@
|
||||
},
|
||||
{
|
||||
"name":"kn",
|
||||
"description":"Comma-separated list of keyspace names to delete snapshots from. If omitted, snapshots are deleted from all keyspaces.",
|
||||
"description":"Comma-separated keyspaces name that their snapshot will be deleted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -759,7 +759,7 @@
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"A table name used to filter which table's snapshots are deleted. If omitted or empty, snapshots for all tables are eligible. When provided together with 'kn', the table is looked up in each listed keyspace independently. For secondary indexes, the logical index name (e.g. 'myindex') can be used and is resolved automatically.",
|
||||
"description":"an optional table name that its snapshot will be deleted",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
@@ -3166,83 +3166,6 @@
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"path":"/storage_service/vnode_tablet_migrations/keyspaces/{keyspace}",
|
||||
"operations":[{
|
||||
"method":"POST",
|
||||
"summary":"Start vnodes-to-tablets migration for all tables in a keyspace",
|
||||
"type":"void",
|
||||
"nickname":"create_vnode_tablet_migration",
|
||||
"produces":["application/json"],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"Keyspace name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get a keyspace's vnodes-to-tablets migration status",
|
||||
"type":"vnode_tablet_migration_status",
|
||||
"nickname":"get_vnode_tablet_migration",
|
||||
"produces":["application/json"],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"Keyspace name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/vnode_tablet_migrations/node/storage_mode",
|
||||
"operations":[{
|
||||
"method":"PUT",
|
||||
"summary":"Set the intended storage mode for this node during vnodes-to-tablets migration",
|
||||
"type":"void",
|
||||
"nickname":"set_vnode_tablet_migration_node_storage_mode",
|
||||
"produces":["application/json"],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"intended_mode",
|
||||
"description":"Intended storage mode (tablets or vnodes)",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/vnode_tablet_migrations/keyspaces/{keyspace}/finalization",
|
||||
"operations":[{
|
||||
"method":"POST",
|
||||
"summary":"Finalize vnodes-to-tablets migration for all tables in a keyspace",
|
||||
"type":"void",
|
||||
"nickname":"finalize_vnode_tablet_migration",
|
||||
"produces":["application/json"],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"Keyspace name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/quiesce_topology",
|
||||
"operations":[
|
||||
@@ -3860,45 +3783,6 @@
|
||||
"description":"The resulting compression ratio (estimated on a random sample of files)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"vnode_tablet_migration_node_status":{
|
||||
"id":"vnode_tablet_migration_node_status",
|
||||
"description":"Node storage mode info during vnodes-to-tablets migration",
|
||||
"properties":{
|
||||
"host_id":{
|
||||
"type":"string",
|
||||
"description":"The host ID"
|
||||
},
|
||||
"current_mode":{
|
||||
"type":"string",
|
||||
"description":"The current storage mode: `vnodes` or `tablets`"
|
||||
},
|
||||
"intended_mode":{
|
||||
"type":"string",
|
||||
"description":"The intended storage mode: `vnodes` or `tablets`"
|
||||
}
|
||||
}
|
||||
},
|
||||
"vnode_tablet_migration_status":{
|
||||
"id":"vnode_tablet_migration_status",
|
||||
"description":"Vnodes-to-tablets migration status for a keyspace",
|
||||
"properties":{
|
||||
"keyspace":{
|
||||
"type":"string",
|
||||
"description":"The keyspace name"
|
||||
},
|
||||
"status":{
|
||||
"type":"string",
|
||||
"description":"The migration status: `vnodes` (not started), `migrating_to_tablets` (in progress), or `tablets` (complete)"
|
||||
},
|
||||
"nodes":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"$ref":"vnode_tablet_migration_node_status"
|
||||
},
|
||||
"description":"Per-node storage mode information. Empty if the keyspace is not being migrated."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "api.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "api/api-doc/authorization_cache.json.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "cache_service.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <seastar/http/short_streams.hh>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "collectd.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "commitlog.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "api/api.hh"
|
||||
@@ -82,16 +82,15 @@ void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx
|
||||
});
|
||||
});
|
||||
|
||||
cs::find_config_id.set(r, [&cfg] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = req->get_path_param("id");
|
||||
auto value = co_await cfg.value_as_json_string_for_name(id);
|
||||
if (!value) {
|
||||
throw bad_param_exception(sstring("No such config entry: ") + id);
|
||||
cs::find_config_id.set(r, [&cfg] (const_req r) {
|
||||
auto id = r.get_path_param("id");
|
||||
for (auto&& cfg_ref : cfg.values()) {
|
||||
auto&& cfg = cfg_ref.get();
|
||||
if (id == cfg.name()) {
|
||||
return cfg.value_as_json();
|
||||
}
|
||||
}
|
||||
//value is already a json string
|
||||
json::json_return_type ret{json::json_void()};
|
||||
ret._res = std::move(*value);
|
||||
co_return ret;
|
||||
throw bad_param_exception(sstring("No such config entry: ") + id);
|
||||
});
|
||||
|
||||
sp::get_rpc_timeout.set(r, [&cfg](const_req req) {
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "build_mode.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#ifndef SCYLLA_BUILD_MODE_RELEASE
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "locator/snitch_base.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "api/api-doc/error_injection.json.hh"
|
||||
@@ -23,7 +23,7 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
|
||||
hf::enable_injection.set(r, [](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
bool one_shot = strcasecmp(req->get_query_param("one_shot").c_str(), "true") == 0;
|
||||
bool one_shot = req->get_query_param("one_shot") == "True";
|
||||
auto params = co_await util::read_entire_stream_contiguous(*req->content_stream);
|
||||
|
||||
const size_t max_params_size = 1024 * 1024;
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "failure_detector.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include <vector>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#include "api/api-doc/lsa.json.hh"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.1
|
||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user