Compare commits
1 Commits
next-6.0
...
annastuchl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
933192a0bb |
84
.github/actions/setup-build/action.yaml
vendored
84
.github/actions/setup-build/action.yaml
vendored
@@ -1,84 +0,0 @@
|
||||
name: setup-build-env
|
||||
description: Setup Building Environment
|
||||
inputs:
|
||||
install_clang_tool:
|
||||
description: 'install clang-tool'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
install_clang_tidy:
|
||||
description: 'install clang-tidy'
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
# use the stable branch
|
||||
# should be the same as the one used by the compositing workflow
|
||||
env:
|
||||
CLANG_VERSION: 18
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Add scylla-ppa repo
|
||||
shell: bash
|
||||
run: |
|
||||
sudo add-apt-repository ppa:scylladb/ppa
|
||||
|
||||
- name: Add clang apt repo
|
||||
if: ${{ inputs.install_clang_tool || inputs.install_clang_tidy }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y curl
|
||||
curl -fsSL https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc >/dev/null
|
||||
repo_component=llvm-toolchain-jammy
|
||||
# use the development branch if $CLANG_VERSION is empty
|
||||
if [ -n "$CLANG_VERSION" ]; then
|
||||
repo_component+=-$CLANG_VERSION
|
||||
fi
|
||||
echo "deb http://apt.llvm.org/jammy/ $repo_component main" | sudo tee -a /etc/apt/sources.list.d/llvm.list
|
||||
sudo apt-get update
|
||||
|
||||
- name: Install clang-tools
|
||||
if: ${{ inputs.install_clang_tools }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y clang-tools-$CLANG_VERSION
|
||||
|
||||
- name: Install clang-tidy
|
||||
if: ${{ inputs.install_clang_tidy }}
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get install -y clang-tidy-$CLANG_VERSION
|
||||
|
||||
- name: Install GCC-12
|
||||
# ubuntu:jammy comes with GCC-11. and libstdc++-11 fails to compile
|
||||
# scylla which defines value type of std::unordered_map in .cc
|
||||
shell: bash
|
||||
run: |
|
||||
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/ppa
|
||||
sudo apt-get install -y libstdc++-12-dev
|
||||
|
||||
- name: Install more build dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
# - do not install java dependencies, which is not only not necessary,
|
||||
# and they include "python", which is not EOL and not available.
|
||||
# - replace "scylla-libthrift010" with "libthrift-dev". because
|
||||
# scylla-libthrift010 : Depends: libssl1.0.0 (>= 1.0.1) but it is not installable
|
||||
# - we don't perform tests, so minio is not necessary.
|
||||
sed -i.orig \
|
||||
-e '/tools\/.*\/install-dependencies.sh/d' \
|
||||
-e 's/scylla-libthrift010-dev/libthrift-dev/' \
|
||||
-e 's/(minio_download_jobs)/(true)/' \
|
||||
./install-dependencies.sh
|
||||
sudo ./install-dependencies.sh
|
||||
mv ./install-dependencies.sh{.orig,}
|
||||
# for ld.lld
|
||||
sudo apt-get install -y lld-18
|
||||
|
||||
- name: Install {fmt} using cooking.sh
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get remove -y libfmt-dev
|
||||
seastar/cooking.sh -d build-fmt -p cooking -i fmt
|
||||
18
.github/clang-tidy-matcher.json
vendored
18
.github/clang-tidy-matcher.json
vendored
@@ -1,18 +0,0 @@
|
||||
{
|
||||
"problemMatcher": [
|
||||
{
|
||||
"owner": "clang-tidy",
|
||||
"pattern": [
|
||||
{
|
||||
"regexp": "^([^:]+):(\\d+):(\\d+):\\s+(warning|error):\\s+(.*?)\\s+\\[(.*?)\\]$",
|
||||
"file": 1,
|
||||
"line": 2,
|
||||
"column": 3,
|
||||
"severity": 4,
|
||||
"message": 5,
|
||||
"code": 6
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
68
.github/scripts/label_promoted_commits.py
vendored
68
.github/scripts/label_promoted_commits.py
vendored
@@ -1,9 +1,9 @@
|
||||
import requests
|
||||
from github import Github
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
import os
|
||||
from github import Github
|
||||
from github.GithubException import UnknownObjectException
|
||||
|
||||
try:
|
||||
github_token = os.environ["GITHUB_TOKEN"]
|
||||
@@ -23,68 +23,36 @@ def parser():
|
||||
'commit, exclusive).')
|
||||
parser.add_argument('--update_issue', type=bool, default=False, help='Set True to update issues when backport was '
|
||||
'done')
|
||||
parser.add_argument('--ref', type=str, required=True, help='PR target branch')
|
||||
parser.add_argument('--label', type=str, required=True, help='Label to use')
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def add_comment_and_close_pr(pr, comment):
|
||||
if pr.state == 'open':
|
||||
pr.create_issue_comment(comment)
|
||||
pr.edit(state="closed")
|
||||
|
||||
|
||||
def mark_backport_done(repo, ref_pr_number, branch):
|
||||
pr = repo.get_pull(int(ref_pr_number))
|
||||
label_to_remove = f'backport/{branch}'
|
||||
label_to_add = f'{label_to_remove}-done'
|
||||
current_labels = [label.name for label in pr.get_labels()]
|
||||
if label_to_remove in current_labels:
|
||||
pr.remove_from_labels(label_to_remove)
|
||||
if label_to_add not in current_labels:
|
||||
pr.add_to_labels(label_to_add)
|
||||
|
||||
|
||||
def main():
|
||||
# This script is triggered by a push event to either the master branch or a branch named branch-x.y (where x and y represent version numbers). Based on the pushed branch, the script performs the following actions:
|
||||
# - When ref branch is `master`, it will add the `promoted-to-master` label, which we need later for the auto backport process
|
||||
# - When ref branch is `branch-x.y` (which means we backported a patch), it will replace in the original PR the `backport/x.y` label with `backport/x.y-done` and will close the backport PR (Since GitHub close only the one referring to default branch)
|
||||
args = parser()
|
||||
pr_pattern = re.compile(r'Closes .*#([0-9]+)')
|
||||
target_branch = re.search(r'branch-(\d+\.\d+)', args.ref)
|
||||
g = Github(github_token)
|
||||
repo = g.get_repo(args.repository, lazy=False)
|
||||
|
||||
commits = repo.compare(head=args.commit_after_merge, base=args.commit_before_merge)
|
||||
processed_prs = set()
|
||||
# Print commit information
|
||||
for commit in commits.commits:
|
||||
print(f'Commit sha is: {commit.sha}')
|
||||
print(commit.sha)
|
||||
match = pr_pattern.search(commit.commit.message)
|
||||
if match:
|
||||
pr_number = int(match.group(1))
|
||||
if pr_number in processed_prs:
|
||||
continue
|
||||
if target_branch:
|
||||
pr = repo.get_pull(pr_number)
|
||||
branch_name = target_branch[1]
|
||||
refs_pr = re.findall(r'Refs (?:#|https.*?)(\d+)', pr.body)
|
||||
if refs_pr:
|
||||
print(f'branch-{target_branch.group(1)}, pr number is: {pr_number}')
|
||||
# 1. change the backport label of the parent PR to note that
|
||||
# we've merge the corresponding backport PR
|
||||
# 2. close the backport PR and leave a comment on it to note
|
||||
# that it has been merged with a certain git commit,
|
||||
ref_pr_number = refs_pr[0]
|
||||
mark_backport_done(repo, ref_pr_number, branch_name)
|
||||
comment = f'Closed via {commit.sha}'
|
||||
add_comment_and_close_pr(pr, comment)
|
||||
pr_number = match.group(1)
|
||||
url = f'https://api.github.com/repos/{args.repository}/issues/{pr_number}/labels'
|
||||
data = {
|
||||
"labels": [f'{args.label}']
|
||||
}
|
||||
headers = {
|
||||
"Authorization": f"token {github_token}",
|
||||
"Accept": "application/vnd.github.v3+json"
|
||||
}
|
||||
response = requests.post(url, headers=headers, json=data)
|
||||
if response.ok:
|
||||
print(f"Label added successfully to {url}")
|
||||
else:
|
||||
try:
|
||||
pr = repo.get_pull(pr_number)
|
||||
pr.add_to_labels('promoted-to-master')
|
||||
print(f'master branch, pr number is: {pr_number}')
|
||||
except UnknownObjectException:
|
||||
print(f'{pr_number} is not a PR but an issue, no need to add label')
|
||||
processed_prs.add(pr_number)
|
||||
print(f"No label was added to {url}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -4,10 +4,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- branch-*.*
|
||||
|
||||
env:
|
||||
DEFAULT_BRANCH: 'master'
|
||||
|
||||
jobs:
|
||||
check-commit:
|
||||
@@ -19,8 +15,6 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ env.DEFAULT_BRANCH }}
|
||||
fetch-depth: 0 # Fetch all history for all tags and branches
|
||||
|
||||
- name: Install dependencies
|
||||
@@ -29,4 +23,4 @@ jobs:
|
||||
- name: Run python script
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: python .github/scripts/label_promoted_commits.py --commit_before_merge ${{ github.event.before }} --commit_after_merge ${{ github.event.after }} --repository ${{ github.repository }} --ref ${{ github.ref }}
|
||||
run: python .github/scripts/label_promoted_commits.py --commit_before_merge ${{ github.event.before }} --commit_after_merge ${{ github.event.after }} --repository ${{ github.repository }} --label promoted-to-master
|
||||
|
||||
63
.github/workflows/clang-tidy.yaml
vendored
63
.github/workflows/clang-tidy.yaml
vendored
@@ -1,63 +0,0 @@
|
||||
name: clang-tidy
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths-ignore:
|
||||
- '**/*.rst'
|
||||
- '**/*.md'
|
||||
- 'docs/**'
|
||||
- '.github/**'
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# only at 5AM Saturday
|
||||
- cron: '0 5 * * SAT'
|
||||
|
||||
env:
|
||||
# use the stable branch
|
||||
CLANG_VERSION: 18
|
||||
BUILD_TYPE: RelWithDebInfo
|
||||
BUILD_DIR: build
|
||||
CLANG_TIDY_CHECKS: '-*,bugprone-use-after-move'
|
||||
|
||||
permissions: {}
|
||||
|
||||
# cancel the in-progress run upon a repush
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
clang-tidy:
|
||||
name: Run clang-tidy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- uses: ./.github/actions/setup-build
|
||||
with:
|
||||
install_clang_tidy: true
|
||||
- name: Generate the building system
|
||||
run: |
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \
|
||||
-DCMAKE_C_COMPILER=clang-$CLANG_VERSION \
|
||||
-DScylla_USE_LINKER=ld.lld-$CLANG_VERSION \
|
||||
-DCMAKE_CXX_COMPILER=clang++-$CLANG_VERSION \
|
||||
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
|
||||
-DCMAKE_CXX_CLANG_TIDY="clang-tidy-$CLANG_VERSION;--checks=$CLANG_TIDY_CHECKS" \
|
||||
-DCMAKE_CXX_FLAGS=-DFMT_HEADER_ONLY \
|
||||
-DCMAKE_PREFIX_PATH=$PWD/cooking \
|
||||
-G Ninja \
|
||||
-B $BUILD_DIR \
|
||||
-S .
|
||||
# see https://github.com/actions/toolkit/blob/main/docs/problem-matchers.md
|
||||
- run: |
|
||||
echo "::add-matcher::.github/clang-tidy-matcher.json"
|
||||
- name: Build with clang-tidy enabled
|
||||
run: |
|
||||
cmake --build $BUILD_DIR --target scylla
|
||||
- run: |
|
||||
echo "::remove-matcher owner=clang-tidy::"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -18,7 +18,7 @@ CMakeLists.txt.user
|
||||
*.egg-info
|
||||
__pycache__CMakeLists.txt.user
|
||||
.gdbinit
|
||||
/resources
|
||||
resources
|
||||
.pytest_cache
|
||||
/expressions.tokens
|
||||
tags
|
||||
@@ -30,4 +30,3 @@ compile_commands.json
|
||||
.ccls-cache/
|
||||
.mypy_cache
|
||||
.envrc
|
||||
clang_build
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -6,9 +6,6 @@
|
||||
path = swagger-ui
|
||||
url = ../scylla-swagger-ui
|
||||
ignore = dirty
|
||||
[submodule "abseil"]
|
||||
path = abseil
|
||||
url = ../abseil-cpp
|
||||
[submodule "scylla-jmx"]
|
||||
path = tools/jmx
|
||||
url = ../scylla-jmx
|
||||
|
||||
@@ -42,6 +42,8 @@ else()
|
||||
COMMENT "List configured modes")
|
||||
endif()
|
||||
|
||||
add_compile_definitions(
|
||||
FMT_DEPRECATED_OSTREAM)
|
||||
include(limit_jobs)
|
||||
# Configure Seastar compile options to align with Scylla
|
||||
set(CMAKE_CXX_STANDARD "20" CACHE INTERNAL "")
|
||||
@@ -55,33 +57,6 @@ set(Seastar_EXCLUDE_APPS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_EXCLUDE_TESTS_FROM_ALL ON CACHE BOOL "" FORCE)
|
||||
set(Seastar_UNUSED_RESULT_ERROR ON CACHE BOOL "" FORCE)
|
||||
add_subdirectory(seastar)
|
||||
set(ABSL_PROPAGATE_CXX_STD ON CACHE BOOL "" FORCE)
|
||||
|
||||
find_package(Sanitizers QUIET)
|
||||
set(sanitizer_cxx_flags
|
||||
$<$<IN_LIST:$<CONFIG>,Debug;Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_COMPILE_OPTIONS>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_COMPILE_OPTIONS>>)
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set(ABSL_GCC_FLAGS ${sanitizer_cxx_flags})
|
||||
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
set(ABSL_LLVM_FLAGS ${sanitizer_cxx_flags})
|
||||
endif()
|
||||
set(ABSL_DEFAULT_LINKOPTS
|
||||
$<$<IN_LIST:$<CONFIG>,Debug;Sanitize>:$<TARGET_PROPERTY:Sanitizers::address,INTERFACE_LINK_LIBRARIES>;$<TARGET_PROPERTY:Sanitizers::undefined_behavior,INTERFACE_LINK_LIBRARIES>>)
|
||||
add_subdirectory(abseil)
|
||||
add_library(absl-headers INTERFACE)
|
||||
target_include_directories(absl-headers SYSTEM INTERFACE
|
||||
"${PROJECT_SOURCE_DIR}/abseil")
|
||||
add_library(absl::headers ALIAS absl-headers)
|
||||
|
||||
# Exclude absl::strerror from the default "all" target since it's not
|
||||
# used in Scylla build and, moreover, makes use of deprecated glibc APIs,
|
||||
# such as sys_nerr, which are not exposed from "stdio.h" since glibc 2.32,
|
||||
# which happens to be the case for recent Fedora distribution versions.
|
||||
#
|
||||
# Need to use the internal "absl_strerror" target name instead of namespaced
|
||||
# variant because `set_target_properties` does not understand the latter form,
|
||||
# unfortunately.
|
||||
set_target_properties(absl_strerror PROPERTIES EXCLUDE_FROM_ALL TRUE)
|
||||
|
||||
# System libraries dependencies
|
||||
find_package(Boost REQUIRED
|
||||
@@ -93,7 +68,7 @@ target_link_libraries(Boost::regex
|
||||
find_package(Lua REQUIRED)
|
||||
find_package(ZLIB REQUIRED)
|
||||
find_package(ICU COMPONENTS uc i18n REQUIRED)
|
||||
find_package(fmt 9.0.0 REQUIRED)
|
||||
find_package(absl COMPONENTS hash raw_hash_set REQUIRED)
|
||||
find_package(libdeflate REQUIRED)
|
||||
find_package(libxcrypt REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
@@ -150,8 +125,6 @@ target_sources(scylla-main
|
||||
target_link_libraries(scylla-main
|
||||
PRIVATE
|
||||
db
|
||||
absl::headers
|
||||
absl::btree
|
||||
absl::hash
|
||||
absl::raw_hash_set
|
||||
Seastar::seastar
|
||||
@@ -264,7 +237,6 @@ target_link_libraries(scylla PRIVATE
|
||||
|
||||
target_link_libraries(scylla PRIVATE
|
||||
seastar
|
||||
absl::headers
|
||||
Boost::program_options)
|
||||
|
||||
target_include_directories(scylla PRIVATE
|
||||
|
||||
@@ -78,7 +78,7 @@ fi
|
||||
|
||||
# Default scylla product/version tags
|
||||
PRODUCT=scylla
|
||||
VERSION=6.0.5
|
||||
VERSION=5.5.0-dev
|
||||
|
||||
if test -f version
|
||||
then
|
||||
@@ -88,13 +88,10 @@ else
|
||||
SCYLLA_VERSION=$VERSION
|
||||
if [ -z "$SCYLLA_RELEASE" ]; then
|
||||
GIT_COMMIT=$(git -C "$SCRIPT_DIR" log --pretty=format:'%h' -n 1 --abbrev=12)
|
||||
# For custom package builds, replace "0" with "counter.yourname",
|
||||
# For custom package builds, replace "0" with "counter.your_name",
|
||||
# where counter starts at 1 and increments for successive versions.
|
||||
# This ensures that the package manager will select your custom
|
||||
# package over the standard release.
|
||||
# Do not use any special characters like - or _ in the name above!
|
||||
# These characters either have special meaning or are illegal in
|
||||
# version strings.
|
||||
SCYLLA_BUILD=0
|
||||
SCYLLA_RELEASE=$SCYLLA_BUILD.$DATE.$GIT_COMMIT
|
||||
elif [ -f "$OUTPUT_DIR/SCYLLA-RELEASE-FILE" ]; then
|
||||
|
||||
1
abseil
1
abseil
Submodule abseil deleted from d7aaad83b4
@@ -27,8 +27,7 @@ target_link_libraries(alternator
|
||||
cql3
|
||||
idl
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
absl::headers)
|
||||
xxHash::xxhash)
|
||||
|
||||
check_headers(check-headers alternator
|
||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
|
||||
@@ -6,10 +6,8 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/core/sleep.hh>
|
||||
#include "alternator/executor.hh"
|
||||
#include "cdc/log.hh"
|
||||
#include "db/config.hh"
|
||||
#include "log.hh"
|
||||
#include "schema/schema_builder.hh"
|
||||
@@ -4440,10 +4438,8 @@ future<executor::request_return_type> executor::list_tables(client_state& client
|
||||
|
||||
auto tables = _proxy.data_dictionary().get_tables(); // hold on to temporary, table_names isn't a container, it's a view
|
||||
auto table_names = tables
|
||||
| boost::adaptors::filtered([this] (data_dictionary::table t) {
|
||||
return t.schema()->ks_name().find(KEYSPACE_NAME_PREFIX) == 0 &&
|
||||
!t.schema()->is_view() &&
|
||||
!cdc::is_log_for_some_table(_proxy.local_db(), t.schema()->ks_name(), t.schema()->cf_name());
|
||||
| boost::adaptors::filtered([] (data_dictionary::table t) {
|
||||
return t.schema()->ks_name().find(KEYSPACE_NAME_PREFIX) == 0 && !t.schema()->is_view();
|
||||
})
|
||||
| boost::adaptors::transformed([] (data_dictionary::table t) {
|
||||
return t.schema()->cf_name();
|
||||
@@ -4579,7 +4575,7 @@ static lw_shared_ptr<keyspace_metadata> create_keyspace_metadata(std::string_vie
|
||||
// used by default on new Alternator tables. Change this initialization
|
||||
// to 0 enable tablets by default, with automatic number of tablets.
|
||||
std::optional<unsigned> initial_tablets;
|
||||
if (sp.get_db().local().get_config().enable_tablets()) {
|
||||
if (sp.get_db().local().get_config().check_experimental(db::experimental_features_t::feature::TABLETS)) {
|
||||
auto it = tags_map.find(INITIAL_TABLETS_TAG_KEY);
|
||||
if (it != tags_map.end()) {
|
||||
// Tag set. If it's a valid number, use it. If not - e.g., it's
|
||||
|
||||
@@ -256,6 +256,6 @@ public:
|
||||
} // namespace parsed
|
||||
} // namespace alternator
|
||||
|
||||
template <> struct fmt::formatter<alternator::parsed::path> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<alternator::parsed::path> : fmt::formatter<std::string_view> {
|
||||
auto format(const alternator::parsed::path&, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "alternator/server.hh"
|
||||
#include "log.hh"
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/http/function_handlers.hh>
|
||||
#include <seastar/http/short_streams.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
@@ -211,11 +210,8 @@ protected:
|
||||
sstring local_dc = topology.get_datacenter();
|
||||
std::unordered_set<gms::inet_address> local_dc_nodes = topology.get_datacenter_endpoints().at(local_dc);
|
||||
for (auto& ip : local_dc_nodes) {
|
||||
// Note that it's not enough for the node to be is_alive() - a
|
||||
// node joining the cluster is also "alive" but not responsive to
|
||||
// requests. We need the node to be in normal state. See #19694.
|
||||
if (_gossiper.is_normal(ip)) {
|
||||
rjson::push_back(results, rjson::from_string(fmt::to_string(ip)));
|
||||
if (_gossiper.is_alive(ip)) {
|
||||
rjson::push_back(results, rjson::from_string(ip.to_sstring()));
|
||||
}
|
||||
}
|
||||
rep->set_status(reply::status_type::ok);
|
||||
|
||||
@@ -1057,6 +1057,9 @@ void executor::add_stream_options(const rjson::value& stream_specification, sche
|
||||
if (stream_enabled->GetBool()) {
|
||||
auto db = sp.data_dictionary();
|
||||
|
||||
if (!db.features().cdc) {
|
||||
throw api_error::validation("StreamSpecification: streams (CDC) feature not enabled in cluster.");
|
||||
}
|
||||
if (!db.features().alternator_streams) {
|
||||
throw api_error::validation("StreamSpecification: alternator streams feature not enabled in cluster.");
|
||||
}
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "log.hh"
|
||||
#include "gc_clock.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "service/client_state.hh"
|
||||
#include "service_permit.hh"
|
||||
#include "timestamp.hh"
|
||||
#include "service/storage_proxy.hh"
|
||||
@@ -384,9 +383,6 @@ static std::vector<std::pair<dht::token_range, gms::inet_address>> get_secondary
|
||||
// the chances of covering all ranges during a scan when restarts occur.
|
||||
// A more deterministic way would be to regularly persist the scanning state,
|
||||
// but that incurs overhead that we want to avoid if not needed.
|
||||
//
|
||||
// FIXME: Check if this algorithm is safe with tablet migration.
|
||||
// https://github.com/scylladb/scylladb/issues/16567
|
||||
enum primary_or_secondary_t {primary, secondary};
|
||||
template<primary_or_secondary_t primary_or_secondary>
|
||||
class token_ranges_owned_by_this_shard {
|
||||
@@ -499,7 +495,6 @@ struct scan_ranges_context {
|
||||
bytes column_name;
|
||||
std::optional<std::string> member;
|
||||
|
||||
service::client_state internal_client_state;
|
||||
::shared_ptr<cql3::selection::selection> selection;
|
||||
std::unique_ptr<service::query_state> query_state_ptr;
|
||||
std::unique_ptr<cql3::query_options> query_options;
|
||||
@@ -509,7 +504,6 @@ struct scan_ranges_context {
|
||||
: s(s)
|
||||
, column_name(column_name)
|
||||
, member(member)
|
||||
, internal_client_state(service::client_state::internal_tag())
|
||||
{
|
||||
// FIXME: don't read the entire items - read only parts of it.
|
||||
// We must read the key columns (to be able to delete) and also
|
||||
@@ -528,9 +522,10 @@ struct scan_ranges_context {
|
||||
std::vector<query::clustering_range> ck_bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||
auto partition_slice = query::partition_slice(std::move(ck_bounds), {}, std::move(regular_columns), opts);
|
||||
command = ::make_lw_shared<query::read_command>(s->id(), s->version(), partition_slice, proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||
executor::client_state client_state{executor::client_state::internal_tag()};
|
||||
tracing::trace_state_ptr trace_state;
|
||||
// NOTICE: empty_service_permit is used because the TTL service has fixed parallelism
|
||||
query_state_ptr = std::make_unique<service::query_state>(internal_client_state, trace_state, empty_service_permit());
|
||||
query_state_ptr = std::make_unique<service::query_state>(client_state, trace_state, empty_service_permit());
|
||||
// FIXME: What should we do on multi-DC? Will we run the expiration on the same ranges on all
|
||||
// DCs or only once for each range? If the latter, we need to change the CLs in the
|
||||
// scanner and deleter.
|
||||
|
||||
@@ -72,8 +72,7 @@ target_link_libraries(api
|
||||
idl
|
||||
wasmtime_bindings
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
absl::headers)
|
||||
xxHash::xxhash)
|
||||
|
||||
check_headers(check-headers api
|
||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||
|
||||
@@ -63,28 +63,6 @@
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Read the state of an injection from all shards",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"error_injection_info"
|
||||
},
|
||||
"nickname":"read_injection",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"injection",
|
||||
"description":"injection name",
|
||||
"required":true,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"path"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -174,39 +152,5 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"models":{
|
||||
"mapper":{
|
||||
"id":"mapper",
|
||||
"description":"A key value mapping",
|
||||
"properties":{
|
||||
"key":{
|
||||
"type":"string",
|
||||
"description":"The key"
|
||||
},
|
||||
"value":{
|
||||
"type":"string",
|
||||
"description":"The value"
|
||||
}
|
||||
}
|
||||
},
|
||||
"error_injection_info":{
|
||||
"id":"error_injection_info",
|
||||
"description":"Information about an error injection",
|
||||
"properties":{
|
||||
"enabled":{
|
||||
"type":"boolean",
|
||||
"description":"Is the error injection enabled"
|
||||
},
|
||||
"parameters":{
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"mapper"
|
||||
},
|
||||
"description":"The parameter values"
|
||||
}
|
||||
},
|
||||
"required":["enabled"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Returns a list of the tokens endpoint mapping, provide keyspace and cf param to get tablet mapping",
|
||||
"summary":"Returns a list of the tokens endpoint mapping",
|
||||
"type":"array",
|
||||
"items":{
|
||||
"type":"mapper"
|
||||
@@ -100,22 +100,6 @@
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
{
|
||||
"name":"keyspace",
|
||||
"description":"The keyspace to provide the tablet mapping for",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"cf",
|
||||
"description":"The table to provide the tablet mapping for",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -1913,14 +1897,6 @@
|
||||
"allowMultiple":false,
|
||||
"type":"string",
|
||||
"paramType":"query"
|
||||
},
|
||||
{
|
||||
"name":"force",
|
||||
"description":"Enforce the source_dc option, even if it unsafe to use for rebuild",
|
||||
"required":false,
|
||||
"allowMultiple":false,
|
||||
"type":"boolean",
|
||||
"paramType":"query"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -2750,22 +2726,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/quiesce_topology",
|
||||
"operations":[
|
||||
{
|
||||
"nickname":"quiesce_topology",
|
||||
"method":"POST",
|
||||
"summary":"Waits until there are no ongoing topology operations. Guarantees that topology operations which started before the call are finished after the call. This doesn't consider requested but not started operations. Such operations may start after the call succeeds.",
|
||||
"type":"void",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/storage_service/metrics/total_hints",
|
||||
"operations":[
|
||||
|
||||
@@ -194,21 +194,6 @@
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/system/highest_supported_sstable_version",
|
||||
"operations":[
|
||||
{
|
||||
"method":"GET",
|
||||
"summary":"Get highest supported sstable version",
|
||||
"type":"string",
|
||||
"nickname":"get_highest_supported_sstable_version",
|
||||
"produces":[
|
||||
"application/json"
|
||||
],
|
||||
"parameters":[]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -346,7 +346,7 @@ void req_params::process(const request& req) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
ent.value = req.get_path_param(name);
|
||||
ent.value = req.param[name];
|
||||
} catch (std::out_of_range&) {
|
||||
throw httpd::bad_param_exception(fmt::format("Mandatory parameter '{}' was not provided", name));
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ static const char* str_to_regex(const sstring& v) {
|
||||
void set_collectd(http_context& ctx, routes& r) {
|
||||
cd::get_collectd.set(r, [](std::unique_ptr<request> req) {
|
||||
|
||||
auto id = ::make_shared<scollectd::type_instance_id>(req->get_path_param("pluginid"),
|
||||
auto id = ::make_shared<scollectd::type_instance_id>(req->param["pluginid"],
|
||||
req->get_query_param("instance"), req->get_query_param("type"),
|
||||
req->get_query_param("type_instance"));
|
||||
|
||||
@@ -91,7 +91,7 @@ void set_collectd(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cd::enable_collectd.set(r, [](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
std::regex plugin(req->get_path_param("pluginid").c_str());
|
||||
std::regex plugin(req->param["pluginid"].c_str());
|
||||
std::regex instance(str_to_regex(req->get_query_param("instance")));
|
||||
std::regex type(str_to_regex(req->get_query_param("type")));
|
||||
std::regex type_instance(str_to_regex(req->get_query_param("type_instance")));
|
||||
|
||||
@@ -6,11 +6,9 @@
|
||||
* SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
*/
|
||||
|
||||
#include <fmt/ranges.h>
|
||||
#include "column_family.hh"
|
||||
#include "api/api.hh"
|
||||
#include "api/api-doc/column_family.json.hh"
|
||||
#include "api/api-doc/storage_service.json.hh"
|
||||
#include <vector>
|
||||
#include <seastar/http/exception.hh>
|
||||
#include "sstables/sstables.hh"
|
||||
@@ -30,7 +28,6 @@ using namespace httpd;
|
||||
|
||||
using namespace json;
|
||||
namespace cf = httpd::column_family_json;
|
||||
namespace ss = httpd::storage_service_json;
|
||||
|
||||
std::tuple<sstring, sstring> parse_fully_qualified_cf_name(sstring name) {
|
||||
auto pos = name.find("%3A");
|
||||
@@ -82,65 +79,6 @@ future<json::json_return_type> get_cf_stats(http_context& ctx,
|
||||
}, std::plus<int64_t>());
|
||||
}
|
||||
|
||||
static future<json::json_return_type> set_tables(http_context& ctx, const sstring& keyspace, std::vector<sstring> tables, std::function<future<>(replica::table&)> set) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
|
||||
return do_with(keyspace, std::move(tables), [&ctx, set] (const sstring& keyspace, const std::vector<sstring>& tables) {
|
||||
return ctx.db.invoke_on_all([&keyspace, &tables, set] (replica::database& db) {
|
||||
return parallel_for_each(tables, [&db, &keyspace, set] (const sstring& table) {
|
||||
replica::table& t = db.find_column_family(keyspace, table);
|
||||
return set(t);
|
||||
});
|
||||
});
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
}
|
||||
|
||||
class autocompaction_toggle_guard {
|
||||
replica::database& _db;
|
||||
public:
|
||||
autocompaction_toggle_guard(replica::database& db) : _db(db) {
|
||||
assert(this_shard_id() == 0);
|
||||
if (!_db._enable_autocompaction_toggle) {
|
||||
throw std::runtime_error("Autocompaction toggle is busy");
|
||||
}
|
||||
_db._enable_autocompaction_toggle = false;
|
||||
}
|
||||
autocompaction_toggle_guard(const autocompaction_toggle_guard&) = delete;
|
||||
autocompaction_toggle_guard(autocompaction_toggle_guard&&) = default;
|
||||
~autocompaction_toggle_guard() {
|
||||
assert(this_shard_id() == 0);
|
||||
_db._enable_autocompaction_toggle = true;
|
||||
}
|
||||
};
|
||||
|
||||
static future<json::json_return_type> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
apilog.info("set_tables_autocompaction: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
|
||||
return ctx.db.invoke_on(0, [&ctx, keyspace, tables = std::move(tables), enabled] (replica::database& db) {
|
||||
auto g = autocompaction_toggle_guard(db);
|
||||
return set_tables(ctx, keyspace, tables, [enabled] (replica::table& cf) {
|
||||
if (enabled) {
|
||||
cf.enable_auto_compaction();
|
||||
} else {
|
||||
return cf.disable_auto_compaction();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).finally([g = std::move(g)] {});
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> set_tables_tombstone_gc(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
apilog.info("set_tables_tombstone_gc: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
return set_tables(ctx, keyspace, std::move(tables), [enabled] (replica::table& t) {
|
||||
t.set_tombstone_gc_enabled(enabled);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> get_cf_stats_count(http_context& ctx, const sstring& name,
|
||||
utils::timed_rate_moving_average_summary_and_histogram replica::column_family_stats::*f) {
|
||||
return map_reduce_cf(ctx, name, int64_t(0), [f](const replica::column_family& cf) {
|
||||
@@ -366,14 +304,6 @@ ratio_holder filter_recent_false_positive_as_ratio_holder(const sstables::shared
|
||||
return ratio_holder(f + sst->filter_get_recent_true_positive(), f);
|
||||
}
|
||||
|
||||
uint64_t accumulate_on_active_memtables(replica::table& t, noncopyable_function<uint64_t(replica::memtable& mt)> action) {
|
||||
uint64_t ret = 0;
|
||||
t.for_each_active_memtable([&] (replica::memtable& mt) {
|
||||
ret += action(mt);
|
||||
});
|
||||
return ret;
|
||||
}
|
||||
|
||||
void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace>& sys_ks) {
|
||||
cf::get_column_family_name.set(r, [&ctx] (const_req req){
|
||||
std::vector<sstring> res;
|
||||
@@ -408,14 +338,14 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_memtable_columns_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), uint64_t{0}, [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, std::mem_fn(&replica::memtable::partition_count));
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t{0}, [](replica::column_family& cf) {
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed(std::mem_fn(&replica::memtable::partition_count)), uint64_t(0));
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
cf::get_all_memtable_columns_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, uint64_t{0}, [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, std::mem_fn(&replica::memtable::partition_count));
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed(std::mem_fn(&replica::memtable::partition_count)), uint64_t(0));
|
||||
}, std::plus<>());
|
||||
});
|
||||
|
||||
@@ -428,34 +358,34 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_memtable_off_heap_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, [] (replica::memtable& active_memtable) {
|
||||
return active_memtable.region().occupancy().total_space();
|
||||
});
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed([] (replica::memtable* active_memtable) {
|
||||
return active_memtable->region().occupancy().total_space();
|
||||
}), uint64_t(0));
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_memtable_off_heap_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, [] (replica::memtable& active_memtable) {
|
||||
return active_memtable.region().occupancy().total_space();
|
||||
});
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed([] (replica::memtable* active_memtable) {
|
||||
return active_memtable->region().occupancy().total_space();
|
||||
}), uint64_t(0));
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_memtable_live_data_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, [] (replica::memtable& active_memtable) {
|
||||
return active_memtable.region().occupancy().used_space();
|
||||
});
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed([] (replica::memtable* active_memtable) {
|
||||
return active_memtable->region().occupancy().used_space();
|
||||
}), uint64_t(0));
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_all_memtable_live_data_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, [] (replica::memtable& active_memtable) {
|
||||
return active_memtable.region().occupancy().used_space();
|
||||
});
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed([] (replica::memtable* active_memtable) {
|
||||
return active_memtable->region().occupancy().used_space();
|
||||
}), uint64_t(0));
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
@@ -469,7 +399,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
cf::get_cf_all_memtables_off_heap_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.occupancy().total_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
@@ -485,7 +415,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
cf::get_cf_all_memtables_live_data_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.occupancy().used_space();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
@@ -493,14 +423,14 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
cf::get_all_cf_all_memtables_live_data_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
warn(unimplemented::cause::INDEXES);
|
||||
return map_reduce_cf(ctx, int64_t(0), [](replica::column_family& cf) {
|
||||
return accumulate_on_active_memtables(cf, [] (replica::memtable& active_memtable) {
|
||||
return active_memtable.region().occupancy().used_space();
|
||||
});
|
||||
return boost::accumulate(cf.active_memtables() | boost::adaptors::transformed([] (replica::memtable* active_memtable) {
|
||||
return active_memtable->region().occupancy().used_space();
|
||||
}), uint64_t(0));
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
|
||||
cf::get_memtable_switch_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats(ctx,req->get_path_param("name") ,&replica::column_family_stats::memtable_switch_count);
|
||||
return get_cf_stats(ctx,req->param["name"] ,&replica::column_family_stats::memtable_switch_count);
|
||||
});
|
||||
|
||||
cf::get_all_memtable_switch_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -509,7 +439,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_estimated_row_size_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
utils::estimated_histogram res(0);
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res.merge(i->get_stats_metadata().estimated_partition_size);
|
||||
@@ -521,7 +451,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_estimated_row_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
uint64_t res = 0;
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res += i->get_stats_metadata().estimated_partition_size.count();
|
||||
@@ -532,7 +462,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_estimated_column_count_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
utils::estimated_histogram res(0);
|
||||
for (auto sstables = cf.get_sstables(); auto& i : *sstables) {
|
||||
res.merge(i->get_stats_metadata().estimated_cells_count);
|
||||
@@ -549,7 +479,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_pending_flushes.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats(ctx,req->get_path_param("name") ,&replica::column_family_stats::pending_flushes);
|
||||
return get_cf_stats(ctx,req->param["name"] ,&replica::column_family_stats::pending_flushes);
|
||||
});
|
||||
|
||||
cf::get_all_pending_flushes.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -557,7 +487,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_read.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats_count(ctx,req->get_path_param("name") ,&replica::column_family_stats::reads);
|
||||
return get_cf_stats_count(ctx,req->param["name"] ,&replica::column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_all_read.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -565,7 +495,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_write.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats_count(ctx, req->get_path_param("name") ,&replica::column_family_stats::writes);
|
||||
return get_cf_stats_count(ctx, req->param["name"] ,&replica::column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -573,19 +503,19 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_read_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_histogram(ctx, req->get_path_param("name"), &replica::column_family_stats::reads);
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_read_latency_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, req->get_path_param("name"), &replica::column_family_stats::reads);
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &replica::column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_read_latency.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats_sum(ctx,req->get_path_param("name") ,&replica::column_family_stats::reads);
|
||||
return get_cf_stats_sum(ctx,req->param["name"] ,&replica::column_family_stats::reads);
|
||||
});
|
||||
|
||||
cf::get_write_latency.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats_sum(ctx, req->get_path_param("name") ,&replica::column_family_stats::writes);
|
||||
return get_cf_stats_sum(ctx, req->param["name"] ,&replica::column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_read_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -597,11 +527,11 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_histogram(ctx, req->get_path_param("name"), &replica::column_family_stats::writes);
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_rate_and_histogram(ctx, req->get_path_param("name"), &replica::column_family_stats::writes);
|
||||
return get_cf_rate_and_histogram(ctx, req->param["name"], &replica::column_family_stats::writes);
|
||||
});
|
||||
|
||||
cf::get_all_write_latency_histogram_depricated.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -613,7 +543,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_pending_compactions.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](replica::column_family& cf) {
|
||||
return cf.estimate_pending_compactions();
|
||||
}, std::plus<int64_t>());
|
||||
});
|
||||
@@ -625,7 +555,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_live_ss_table_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_stats(ctx, req->get_path_param("name"), &replica::column_family_stats::live_sstable_count);
|
||||
return get_cf_stats(ctx, req->param["name"], &replica::column_family_stats::live_sstable_count);
|
||||
});
|
||||
|
||||
cf::get_all_live_ss_table_count.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -633,11 +563,11 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_unleveled_sstables.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_unleveled_sstables(ctx, req->get_path_param("name"));
|
||||
return get_cf_unleveled_sstables(ctx, req->param["name"]);
|
||||
});
|
||||
|
||||
cf::get_live_disk_space_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return sum_sstable(ctx, req->get_path_param("name"), false);
|
||||
return sum_sstable(ctx, req->param["name"], false);
|
||||
});
|
||||
|
||||
cf::get_all_live_disk_space_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -645,7 +575,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_total_disk_space_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return sum_sstable(ctx, req->get_path_param("name"), true);
|
||||
return sum_sstable(ctx, req->param["name"], true);
|
||||
});
|
||||
|
||||
cf::get_all_total_disk_space_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
@@ -654,7 +584,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_min_row_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), INT64_MAX, min_partition_size, min_int64);
|
||||
return map_reduce_cf(ctx, req->param["name"], INT64_MAX, min_partition_size, min_int64);
|
||||
});
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
@@ -664,7 +594,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_max_row_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), int64_t(0), max_partition_size, max_int64);
|
||||
return map_reduce_cf(ctx, req->param["name"], int64_t(0), max_partition_size, max_int64);
|
||||
});
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
@@ -675,7 +605,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
cf::get_mean_row_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
// Cassandra 3.x mean values are truncated as integrals.
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), integral_ratio_holder(), mean_partition_size, std::plus<integral_ratio_holder>());
|
||||
return map_reduce_cf(ctx, req->param["name"], integral_ratio_holder(), mean_partition_size, std::plus<integral_ratio_holder>());
|
||||
});
|
||||
|
||||
// FIXME: this refers to partitions, not rows.
|
||||
@@ -685,7 +615,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_false_positives.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_get_false_positive();
|
||||
@@ -703,7 +633,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_recent_bloom_filter_false_positives.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_get_recent_false_positive();
|
||||
@@ -721,7 +651,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_false_ratio.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), ratio_holder(), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], ratio_holder(), [] (replica::column_family& cf) {
|
||||
return boost::accumulate(*cf.get_sstables() | boost::adaptors::transformed(filter_false_positive_as_ratio_holder), ratio_holder());
|
||||
}, std::plus<>());
|
||||
});
|
||||
@@ -733,7 +663,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_recent_bloom_filter_false_ratio.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), ratio_holder(), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], ratio_holder(), [] (replica::column_family& cf) {
|
||||
return boost::accumulate(*cf.get_sstables() | boost::adaptors::transformed(filter_recent_false_positive_as_ratio_holder), ratio_holder());
|
||||
}, std::plus<>());
|
||||
});
|
||||
@@ -745,7 +675,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_disk_space_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_size();
|
||||
@@ -763,7 +693,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_bloom_filter_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->filter_memory_size();
|
||||
@@ -781,7 +711,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), uint64_t(0), [] (replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (replica::column_family& cf) {
|
||||
auto sstables = cf.get_sstables();
|
||||
return std::accumulate(sstables->begin(), sstables->end(), uint64_t(0), [](uint64_t s, auto& sst) {
|
||||
return s + sst->get_summary().memory_footprint();
|
||||
@@ -804,7 +734,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
// We are missing the off heap memory calculation
|
||||
// Return 0 is the wrong value. It's a work around
|
||||
// until the memory calculation will be available
|
||||
//auto id = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
@@ -817,7 +747,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
cf::get_speculative_retries.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
@@ -830,14 +760,32 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
cf::get_key_cache_hit_rate.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_true_snapshots_size.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
auto uuid = get_uuid(req->param["name"], ctx.db.local());
|
||||
return ctx.db.local().find_column_family(uuid).get_snapshot_details().then([](
|
||||
const std::unordered_map<sstring, replica::column_family::snapshot_details>& sd) {
|
||||
int64_t res = 0;
|
||||
for (auto i : sd) {
|
||||
res += i.second.total;
|
||||
}
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_all_true_snapshots_size.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
cf::get_row_cache_hit_out_of_range.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
@@ -848,7 +796,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_row_cache_hit.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_raw(ctx, req->get_path_param("name"), utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return cf.get_row_cache().stats().hits.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -864,7 +812,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_row_cache_miss.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_raw(ctx, req->get_path_param("name"), utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], utils::rate_moving_average(), [](const replica::column_family& cf) {
|
||||
return cf.get_row_cache().stats().misses.rate();
|
||||
}, std::plus<utils::rate_moving_average>()).then([](const utils::rate_moving_average& m) {
|
||||
return make_ready_future<json::json_return_type>(meter_to_json(m));
|
||||
@@ -881,120 +829,102 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_cas_prepare.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->get_path_param("name"), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_prepare.histogram();
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_cas_propose.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->get_path_param("name"), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_accept.histogram();
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_cas_commit.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->get_path_param("name"), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().cas_learn.histogram();
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_sstables_per_read_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf(ctx, req->get_path_param("name"), utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return map_reduce_cf(ctx, req->param["name"], utils::estimated_histogram(0), [](replica::column_family& cf) {
|
||||
return cf.get_stats().estimated_sstable_per_read;
|
||||
},
|
||||
utils::estimated_histogram_merge, utils_json::estimated_histogram());
|
||||
});
|
||||
|
||||
cf::get_tombstone_scanned_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_histogram(ctx, req->get_path_param("name"), &replica::column_family_stats::tombstone_scanned);
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::tombstone_scanned);
|
||||
});
|
||||
|
||||
cf::get_live_scanned_histogram.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
return get_cf_histogram(ctx, req->get_path_param("name"), &replica::column_family_stats::live_scanned);
|
||||
return get_cf_histogram(ctx, req->param["name"], &replica::column_family_stats::live_scanned);
|
||||
});
|
||||
|
||||
cf::get_col_update_time_delta_histogram.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
//auto id = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
//auto id = get_uuid(req->param["name"], ctx.db.local());
|
||||
std::vector<double> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
|
||||
cf::get_auto_compaction.set(r, [&ctx] (const_req req) {
|
||||
auto uuid = get_uuid(req.get_path_param("name"), ctx.db.local());
|
||||
auto uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
replica::column_family& cf = ctx.db.local().find_column_family(uuid);
|
||||
return !cf.is_auto_compaction_disabled_by_user();
|
||||
});
|
||||
|
||||
cf::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/enable_auto_compaction: name={}", req->get_path_param("name"));
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_autocompaction(ctx, ks, {std::move(cf)}, true);
|
||||
apilog.info("column_family/enable_auto_compaction: name={}", req->param["name"]);
|
||||
return ctx.db.invoke_on(0, [&ctx, req = std::move(req)] (replica::database& db) {
|
||||
auto g = replica::database::autocompaction_toggle_guard(db);
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::column_family &cf) {
|
||||
cf.enable_auto_compaction();
|
||||
}).then([g = std::move(g)] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
cf::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/disable_auto_compaction: name={}", req->get_path_param("name"));
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_autocompaction(ctx, ks, {std::move(cf)}, false);
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
apilog.info("column_family/disable_auto_compaction: name={}", req->param["name"]);
|
||||
return ctx.db.invoke_on(0, [&ctx, req = std::move(req)] (replica::database& db) {
|
||||
auto g = replica::database::autocompaction_toggle_guard(db);
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::column_family &cf) {
|
||||
return cf.disable_auto_compaction();
|
||||
}).then([g = std::move(g)] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_tombstone_gc.set(r, [&ctx] (const_req req) {
|
||||
auto uuid = get_uuid(req.get_path_param("name"), ctx.db.local());
|
||||
auto uuid = get_uuid(req.param["name"], ctx.db.local());
|
||||
replica::table& t = ctx.db.local().find_column_family(uuid);
|
||||
return t.tombstone_gc_enabled();
|
||||
});
|
||||
|
||||
cf::enable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/enable_tombstone_gc: name={}", req->get_path_param("name"));
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_tombstone_gc(ctx, ks, {std::move(cf)}, true);
|
||||
apilog.info("column_family/enable_tombstone_gc: name={}", req->param["name"]);
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::table& t) {
|
||||
t.set_tombstone_gc_enabled(true);
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
cf::disable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
apilog.info("column_family/disable_tombstone_gc: name={}", req->get_path_param("name"));
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
validate_table(ctx, ks, cf);
|
||||
return set_tables_tombstone_gc(ctx, ks, {std::move(cf)}, false);
|
||||
});
|
||||
|
||||
ss::enable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_tombstone_gc: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_tombstone_gc(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_tombstone_gc: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_tombstone_gc(ctx, keyspace, tables, false);
|
||||
apilog.info("column_family/disable_tombstone_gc: name={}", req->param["name"]);
|
||||
return foreach_column_family(ctx, req->param["name"], [](replica::table& t) {
|
||||
t.set_tombstone_gc_enabled(false);
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_built_indexes.set(r, [&ctx, &sys_ks](std::unique_ptr<http::request> req) {
|
||||
auto ks_cf = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
auto ks_cf = parse_fully_qualified_cf_name(req->param["name"]);
|
||||
auto&& ks = std::get<0>(ks_cf);
|
||||
auto&& cf_name = std::get<1>(ks_cf);
|
||||
return sys_ks.local().load_view_build_progress().then([ks, cf_name, &ctx](const std::vector<db::system_keyspace_view_build_progress>& vb) mutable {
|
||||
@@ -1032,7 +962,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_compression_ratio.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto uuid = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
auto uuid = get_uuid(req->param["name"], ctx.db.local());
|
||||
|
||||
return ctx.db.map_reduce(sum_ratio<double>(), [uuid](replica::database& db) {
|
||||
replica::column_family& cf = db.find_column_family(uuid);
|
||||
@@ -1043,21 +973,21 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_read_latency_estimated_histogram.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->get_path_param("name"), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().reads.histogram();
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_write_latency_estimated_histogram.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->get_path_param("name"), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_time_histogram(ctx, req->param["name"], [](const replica::column_family& cf) {
|
||||
return cf.get_stats().writes.histogram();
|
||||
});
|
||||
});
|
||||
|
||||
cf::set_compaction_strategy_class.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
sstring strategy = req->get_query_param("class_name");
|
||||
apilog.info("column_family/set_compaction_strategy_class: name={} strategy={}", req->get_path_param("name"), strategy);
|
||||
return foreach_column_family(ctx, req->get_path_param("name"), [strategy](replica::column_family& cf) {
|
||||
apilog.info("column_family/set_compaction_strategy_class: name={} strategy={}", req->param["name"], strategy);
|
||||
return foreach_column_family(ctx, req->param["name"], [strategy](replica::column_family& cf) {
|
||||
cf.set_compaction_strategy(sstables::compaction_strategy::type(strategy));
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
@@ -1065,7 +995,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_compaction_strategy_class.set(r, [&ctx](const_req req) {
|
||||
return ctx.db.local().find_column_family(get_uuid(req.get_path_param("name"), ctx.db.local())).get_compaction_strategy().name();
|
||||
return ctx.db.local().find_column_family(get_uuid(req.param["name"], ctx.db.local())).get_compaction_strategy().name();
|
||||
});
|
||||
|
||||
cf::set_compression_parameters.set(r, [](std::unique_ptr<http::request> req) {
|
||||
@@ -1081,7 +1011,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
});
|
||||
|
||||
cf::get_sstable_count_per_level.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
return map_reduce_cf_raw(ctx, req->get_path_param("name"), std::vector<uint64_t>(), [](const replica::column_family& cf) {
|
||||
return map_reduce_cf_raw(ctx, req->param["name"], std::vector<uint64_t>(), [](const replica::column_family& cf) {
|
||||
return cf.sstable_count_per_level();
|
||||
}, concat_sstable_count_per_level).then([](const std::vector<uint64_t>& res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
@@ -1090,7 +1020,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
cf::get_sstables_for_key.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto key = req->get_query_param("key");
|
||||
auto uuid = get_uuid(req->get_path_param("name"), ctx.db.local());
|
||||
auto uuid = get_uuid(req->param["name"], ctx.db.local());
|
||||
|
||||
return ctx.db.map_reduce0([key, uuid] (replica::database& db) -> future<std::unordered_set<sstring>> {
|
||||
auto sstables = co_await db.find_column_family(uuid).get_sstables_by_partition_key(key);
|
||||
@@ -1106,7 +1036,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
|
||||
|
||||
cf::toppartitions.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
auto name = req->get_path_param("name");
|
||||
auto name = req->param["name"];
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(name);
|
||||
|
||||
api::req_param<std::chrono::milliseconds, unsigned> duration{*req, "duration", 1000ms};
|
||||
@@ -1133,7 +1063,7 @@ void set_column_family(http_context& ctx, routes& r, sharded<db::system_keyspace
|
||||
}
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(*params.get("name"));
|
||||
auto flush = params.get_as<bool>("flush_memtables").value_or(true);
|
||||
apilog.info("column_family/force_major_compaction: name={} flush={}", req->get_path_param("name"), flush);
|
||||
apilog.info("column_family/force_major_compaction: name={} flush={}", req->param["name"], flush);
|
||||
|
||||
auto keyspace = validate_keyspace(ctx, ks);
|
||||
std::vector<table_info> table_infos = {table_info{
|
||||
@@ -1226,6 +1156,8 @@ void unset_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_speculative_retries.unset(r);
|
||||
cf::get_all_speculative_retries.unset(r);
|
||||
cf::get_key_cache_hit_rate.unset(r);
|
||||
cf::get_true_snapshots_size.unset(r);
|
||||
cf::get_all_true_snapshots_size.unset(r);
|
||||
cf::get_row_cache_hit_out_of_range.unset(r);
|
||||
cf::get_all_row_cache_hit_out_of_range.unset(r);
|
||||
cf::get_row_cache_hit.unset(r);
|
||||
@@ -1242,13 +1174,6 @@ void unset_column_family(http_context& ctx, routes& r) {
|
||||
cf::get_auto_compaction.unset(r);
|
||||
cf::enable_auto_compaction.unset(r);
|
||||
cf::disable_auto_compaction.unset(r);
|
||||
ss::enable_auto_compaction.unset(r);
|
||||
ss::disable_auto_compaction.unset(r);
|
||||
cf::get_tombstone_gc.unset(r);
|
||||
cf::enable_tombstone_gc.unset(r);
|
||||
cf::disable_tombstone_gc.unset(r);
|
||||
ss::enable_tombstone_gc.unset(r);
|
||||
ss::disable_tombstone_gc.unset(r);
|
||||
cf::get_built_indexes.unset(r);
|
||||
cf::get_compression_metadata_off_heap_memory_used.unset(r);
|
||||
cf::get_compression_parameters.unset(r);
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/exception.hh>
|
||||
|
||||
#include "compaction_manager.hh"
|
||||
#include "compaction/compaction_manager.hh"
|
||||
@@ -111,7 +110,7 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cm::stop_keyspace_compaction.set(r, [&ctx] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto ks_name = validate_keyspace(ctx, req);
|
||||
auto ks_name = validate_keyspace(ctx, req->param);
|
||||
auto table_names = parse_tables(ks_name, ctx, req->query_parameters, "tables");
|
||||
if (table_names.empty()) {
|
||||
table_names = map_keys(ctx.db.local().find_keyspace(ks_name).metadata().get()->cf_meta_data());
|
||||
@@ -154,13 +153,10 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
cm::get_compaction_history.set(r, [&ctx] (std::unique_ptr<http::request> req) {
|
||||
std::function<future<>(output_stream<char>&&)> f = [&ctx] (output_stream<char>&& out) -> future<> {
|
||||
auto s = std::move(out);
|
||||
bool first = true;
|
||||
std::exception_ptr ex;
|
||||
try {
|
||||
co_await s.write("[");
|
||||
co_await ctx.db.local().get_compaction_manager().get_compaction_history([&s, &first](const db::compaction_history_entry& entry) mutable -> future<> {
|
||||
std::function<future<>(output_stream<char>&&)> f = [&ctx](output_stream<char>&& s) {
|
||||
return do_with(output_stream<char>(std::move(s)), true, [&ctx] (output_stream<char>& s, bool& first){
|
||||
return s.write("[").then([&ctx, &s, &first] {
|
||||
return ctx.db.local().get_compaction_manager().get_compaction_history([&s, &first](const db::compaction_history_entry& entry) mutable {
|
||||
cm::history h;
|
||||
h.id = fmt::to_string(entry.id);
|
||||
h.ks = std::move(entry.ks);
|
||||
@@ -174,21 +170,18 @@ void set_compaction_manager(http_context& ctx, routes& r) {
|
||||
e.value = it.second;
|
||||
h.rows_merged.push(std::move(e));
|
||||
}
|
||||
if (!first) {
|
||||
co_await s.write(", ");
|
||||
}
|
||||
auto fut = first ? make_ready_future<>() : s.write(", ");
|
||||
first = false;
|
||||
co_await formatter::write(s, h);
|
||||
return fut.then([&s, h = std::move(h)] {
|
||||
return formatter::write(s, h);
|
||||
});
|
||||
}).then([&s] {
|
||||
return s.write("]").then([&s] {
|
||||
return s.close();
|
||||
});
|
||||
});
|
||||
co_await s.write("]");
|
||||
co_await s.flush();
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
co_await s.close();
|
||||
if (ex) {
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
return make_ready_future<json::json_return_type>(std::move(f));
|
||||
});
|
||||
|
||||
@@ -92,7 +92,7 @@ void set_config(std::shared_ptr < api_registry_builder20 > rb, http_context& ctx
|
||||
});
|
||||
|
||||
cs::find_config_id.set(r, [&cfg] (const_req r) {
|
||||
auto id = r.get_path_param("id");
|
||||
auto id = r.param["id"];
|
||||
for (auto&& cfg_ref : cfg.values()) {
|
||||
auto&& cfg = cfg_ref.get();
|
||||
if (id == cfg.name()) {
|
||||
|
||||
@@ -24,7 +24,7 @@ namespace hf = httpd::error_injection_json;
|
||||
void set_error_injection(http_context& ctx, routes& r) {
|
||||
|
||||
hf::enable_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
sstring injection = req->param["injection"];
|
||||
bool one_shot = req->get_query_param("one_shot") == "True";
|
||||
auto params = req->content;
|
||||
|
||||
@@ -56,7 +56,7 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
hf::disable_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
sstring injection = req->param["injection"];
|
||||
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all(injection).then([] {
|
||||
@@ -64,32 +64,6 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
});
|
||||
});
|
||||
|
||||
hf::read_injection.set(r, [](std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
const sstring injection = req->get_path_param("injection");
|
||||
|
||||
std::vector<error_injection_json::error_injection_info> error_injection_infos(smp::count, error_injection_json::error_injection_info{});
|
||||
|
||||
co_await smp::invoke_on_all([&] {
|
||||
auto& info = error_injection_infos[this_shard_id()];
|
||||
auto& errinj = utils::get_local_injector();
|
||||
const auto enabled = errinj.is_enabled(injection);
|
||||
info.enabled = enabled;
|
||||
if (!enabled) {
|
||||
return;
|
||||
}
|
||||
std::vector<error_injection_json::mapper> parameters;
|
||||
for (const auto& p : errinj.get_injection_parameters(injection)) {
|
||||
error_injection_json::mapper param;
|
||||
param.key = p.first;
|
||||
param.value = p.second;
|
||||
parameters.push_back(std::move(param));
|
||||
}
|
||||
info.parameters = std::move(parameters);
|
||||
});
|
||||
|
||||
co_return json::json_return_type(error_injection_infos);
|
||||
});
|
||||
|
||||
hf::disable_on_all.set(r, [](std::unique_ptr<request> req) {
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.disable_on_all().then([] {
|
||||
@@ -98,7 +72,7 @@ void set_error_injection(http_context& ctx, routes& r) {
|
||||
});
|
||||
|
||||
hf::message_injection.set(r, [](std::unique_ptr<request> req) {
|
||||
sstring injection = req->get_path_param("injection");
|
||||
sstring injection = req->param["injection"];
|
||||
auto& errinj = utils::get_local_injector();
|
||||
return errinj.receive_message_on_all(injection).then([] {
|
||||
return make_ready_future<json::json_return_type>(json::json_void());
|
||||
|
||||
@@ -66,7 +66,7 @@ void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
return g.container().invoke_on(0, [] (gms::gossiper& g) {
|
||||
std::map<sstring, sstring> nodes_status;
|
||||
g.for_each_endpoint_state([&] (const gms::inet_address& node, const gms::endpoint_state&) {
|
||||
nodes_status.emplace(fmt::to_string(node), g.is_alive(node) ? "UP" : "DOWN");
|
||||
nodes_status.emplace(node.to_sstring(), g.is_alive(node) ? "UP" : "DOWN");
|
||||
});
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value<fd::mapper>(nodes_status));
|
||||
});
|
||||
@@ -81,9 +81,9 @@ void set_failure_detector(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
|
||||
fd::get_endpoint_state.set(r, [&g] (std::unique_ptr<request> req) {
|
||||
return g.container().invoke_on(0, [req = std::move(req)] (gms::gossiper& g) {
|
||||
auto state = g.get_endpoint_state_ptr(gms::inet_address(req->get_path_param("addr")));
|
||||
auto state = g.get_endpoint_state_ptr(gms::inet_address(req->param["addr"]));
|
||||
if (!state) {
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->get_path_param("addr")));
|
||||
return make_ready_future<json::json_return_type>(format("unknown endpoint {}", req->param["addr"]));
|
||||
}
|
||||
std::stringstream ss;
|
||||
g.append_endpoint_state(ss, *state);
|
||||
|
||||
@@ -32,21 +32,21 @@ void set_gossiper(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_endpoint_downtime.set(r, [&g] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
// synchronize unreachable_members on all shards
|
||||
co_await g.get_unreachable_members_synchronized();
|
||||
co_return g.get_endpoint_downtime(ep);
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_generation_number.set(r, [&g] (std::unique_ptr<http::request> req) {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return g.get_current_generation_number(ep).then([] (gms::generation_type res) {
|
||||
return make_ready_future<json::json_return_type>(res.value());
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::get_current_heart_beat_version.set(r, [&g] (std::unique_ptr<http::request> req) {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return g.get_current_heart_beat_version(ep).then([] (gms::version_type res) {
|
||||
return make_ready_future<json::json_return_type>(res.value());
|
||||
});
|
||||
@@ -54,17 +54,17 @@ void set_gossiper(http_context& ctx, routes& r, gms::gossiper& g) {
|
||||
|
||||
httpd::gossiper_json::assassinate_endpoint.set(r, [&g](std::unique_ptr<http::request> req) {
|
||||
if (req->get_query_param("unsafe") != "True") {
|
||||
return g.assassinate_endpoint(req->get_path_param("addr")).then([] {
|
||||
return g.assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
}
|
||||
return g.unsafe_assassinate_endpoint(req->get_path_param("addr")).then([] {
|
||||
return g.unsafe_assassinate_endpoint(req->param["addr"]).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
});
|
||||
|
||||
httpd::gossiper_json::force_remove_endpoint.set(r, [&g](std::unique_ptr<http::request> req) {
|
||||
gms::inet_address ep(req->get_path_param("addr"));
|
||||
gms::inet_address ep(req->param["addr"]);
|
||||
return g.force_remove_endpoint(ep, gms::null_permit_id).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
@@ -146,7 +146,7 @@ void set_messaging_service(http_context& ctx, routes& r, sharded<netw::messaging
|
||||
});
|
||||
|
||||
hf::inject_disconnect.set(r, [&ms] (std::unique_ptr<request> req) -> future<json::json_return_type> {
|
||||
auto ip = msg_addr(req->get_path_param("ip"));
|
||||
auto ip = msg_addr(req->param["ip"]);
|
||||
co_await ms.invoke_on_all([ip] (netw::messaging_service& ms) {
|
||||
ms.remove_rpc_client(ip);
|
||||
});
|
||||
|
||||
34
api/raft.cc
34
api/raft.cc
@@ -24,7 +24,7 @@ using namespace json;
|
||||
|
||||
void set_raft(http_context&, httpd::routes& r, sharded<service::raft_group_registry>& raft_gr) {
|
||||
r::trigger_snapshot.set(r, [&raft_gr] (std::unique_ptr<http::request> req) -> future<json_return_type> {
|
||||
raft::group_id gid{utils::UUID{req->get_path_param("group_id")}};
|
||||
raft::group_id gid{utils::UUID{req->param["group_id"]}};
|
||||
auto timeout_dur = std::invoke([timeout_str = req->get_query_param("timeout")] {
|
||||
if (timeout_str.empty()) {
|
||||
return std::chrono::seconds{60};
|
||||
@@ -61,31 +61,17 @@ void set_raft(http_context&, httpd::routes& r, sharded<service::raft_group_regis
|
||||
co_return json_void{};
|
||||
});
|
||||
r::get_leader_host.set(r, [&raft_gr] (std::unique_ptr<http::request> req) -> future<json_return_type> {
|
||||
if (!req->query_parameters.contains("group_id")) {
|
||||
const auto leader_id = co_await raft_gr.invoke_on(0, [] (service::raft_group_registry& raft_gr) {
|
||||
auto& srv = raft_gr.group0();
|
||||
return srv.current_leader();
|
||||
return smp::submit_to(0, [&] {
|
||||
auto& srv = std::invoke([&] () -> raft::server& {
|
||||
if (req->query_parameters.contains("group_id")) {
|
||||
raft::group_id id{utils::UUID{req->get_query_param("group_id")}};
|
||||
return raft_gr.local().get_server(id);
|
||||
} else {
|
||||
return raft_gr.local().group0();
|
||||
}
|
||||
});
|
||||
co_return json_return_type{leader_id.to_sstring()};
|
||||
}
|
||||
|
||||
const raft::group_id gid{utils::UUID{req->get_query_param("group_id")}};
|
||||
|
||||
std::atomic<bool> found_srv{false};
|
||||
std::atomic<raft::server_id> leader_id = raft::server_id::create_null_id();
|
||||
co_await raft_gr.invoke_on_all([gid, &found_srv, &leader_id] (service::raft_group_registry& raft_gr) {
|
||||
if (raft_gr.find_server(gid)) {
|
||||
found_srv = true;
|
||||
leader_id = raft_gr.get_server(gid).current_leader();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
return json_return_type(srv.current_leader().to_sstring());
|
||||
});
|
||||
|
||||
if (!found_srv) {
|
||||
throw bad_param_exception{fmt::format("Server for group ID {} not found", gid)};
|
||||
}
|
||||
|
||||
co_return json_return_type(leader_id.load().to_sstring());
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <boost/algorithm/string/trim_all.hpp>
|
||||
#include <boost/algorithm/string/case_conv.hpp>
|
||||
#include <boost/functional/hash.hpp>
|
||||
#include <fmt/ranges.h>
|
||||
#include "service/raft/raft_group0_client.hh"
|
||||
#include "service/storage_service.hh"
|
||||
#include "service/load_meter.hh"
|
||||
@@ -36,7 +35,6 @@
|
||||
#include <seastar/http/exception.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/parallel_for_each.hh>
|
||||
#include <seastar/coroutine/exception.hh>
|
||||
#include "repair/row_level.hh"
|
||||
#include "locator/snitch_base.hh"
|
||||
#include "column_family.hh"
|
||||
@@ -55,7 +53,6 @@
|
||||
#include "locator/abstract_replication_strategy.hh"
|
||||
#include "sstables_loader.hh"
|
||||
#include "db/view/view_builder.hh"
|
||||
#include "utils/user_provided_param.hh"
|
||||
|
||||
using namespace seastar::httpd;
|
||||
using namespace std::chrono_literals;
|
||||
@@ -66,7 +63,6 @@ namespace api {
|
||||
|
||||
namespace ss = httpd::storage_service_json;
|
||||
namespace sp = httpd::storage_proxy_json;
|
||||
namespace cf = httpd::column_family_json;
|
||||
using namespace json;
|
||||
|
||||
sstring validate_keyspace(const http_context& ctx, sstring ks_name) {
|
||||
@@ -76,15 +72,11 @@ sstring validate_keyspace(const http_context& ctx, sstring ks_name) {
|
||||
throw bad_param_exception(replica::no_such_keyspace(ks_name).what());
|
||||
}
|
||||
|
||||
sstring validate_keyspace(const http_context& ctx, const std::unique_ptr<http::request>& req) {
|
||||
return validate_keyspace(ctx, req->get_path_param("keyspace"));
|
||||
sstring validate_keyspace(const http_context& ctx, const parameters& param) {
|
||||
return validate_keyspace(ctx, param["keyspace"]);
|
||||
}
|
||||
|
||||
sstring validate_keyspace(const http_context& ctx, const http::request& req) {
|
||||
return validate_keyspace(ctx, req.get_path_param("keyspace"));
|
||||
}
|
||||
|
||||
void validate_table(const http_context& ctx, sstring ks_name, sstring table_name) {
|
||||
static void validate_table(const http_context& ctx, sstring ks_name, sstring table_name) {
|
||||
auto& db = ctx.db.local();
|
||||
try {
|
||||
db.find_column_family(ks_name, table_name);
|
||||
@@ -207,13 +199,14 @@ using ks_cf_func = std::function<future<json::json_return_type>(http_context&, s
|
||||
|
||||
static auto wrap_ks_cf(http_context &ctx, ks_cf_func f) {
|
||||
return [&ctx, f = std::move(f)](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto table_infos = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
return f(ctx, std::move(req), std::move(keyspace), std::move(table_infos));
|
||||
};
|
||||
}
|
||||
|
||||
seastar::future<json::json_return_type> run_toppartitions_query(db::toppartitions_query& q, http_context &ctx, bool legacy_request) {
|
||||
namespace cf = httpd::column_family_json;
|
||||
return q.scatter().then([&q, legacy_request] {
|
||||
return sleep(q.duration()).then([&q, legacy_request] {
|
||||
return q.gather(q.capacity()).then([&q, legacy_request] (auto topk_results) {
|
||||
@@ -243,6 +236,47 @@ seastar::future<json::json_return_type> run_toppartitions_query(db::toppartition
|
||||
});
|
||||
}
|
||||
|
||||
static future<json::json_return_type> set_tables(http_context& ctx, const sstring& keyspace, std::vector<sstring> tables, std::function<future<>(replica::table&)> set) {
|
||||
if (tables.empty()) {
|
||||
tables = map_keys(ctx.db.local().find_keyspace(keyspace).metadata().get()->cf_meta_data());
|
||||
}
|
||||
|
||||
return do_with(keyspace, std::move(tables), [&ctx, set] (const sstring& keyspace, const std::vector<sstring>& tables) {
|
||||
return ctx.db.invoke_on_all([&keyspace, &tables, set] (replica::database& db) {
|
||||
return parallel_for_each(tables, [&db, &keyspace, set] (const sstring& table) {
|
||||
replica::table& t = db.find_column_family(keyspace, table);
|
||||
return set(t);
|
||||
});
|
||||
});
|
||||
}).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> set_tables_autocompaction(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
apilog.info("set_tables_autocompaction: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
|
||||
return ctx.db.invoke_on(0, [&ctx, keyspace, tables = std::move(tables), enabled] (replica::database& db) {
|
||||
auto g = replica::database::autocompaction_toggle_guard(db);
|
||||
return set_tables(ctx, keyspace, tables, [enabled] (replica::table& cf) {
|
||||
if (enabled) {
|
||||
cf.enable_auto_compaction();
|
||||
} else {
|
||||
return cf.disable_auto_compaction();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).finally([g = std::move(g)] {});
|
||||
});
|
||||
}
|
||||
|
||||
future<json::json_return_type> set_tables_tombstone_gc(http_context& ctx, const sstring &keyspace, std::vector<sstring> tables, bool enabled) {
|
||||
apilog.info("set_tables_tombstone_gc: enabled={} keyspace={} tables={}", enabled, keyspace, tables);
|
||||
return set_tables(ctx, keyspace, std::move(tables), [enabled] (replica::table& t) {
|
||||
t.set_tombstone_gc_enabled(enabled);
|
||||
return make_ready_future<>();
|
||||
});
|
||||
}
|
||||
|
||||
future<scrub_info> parse_scrub_options(const http_context& ctx, sharded<db::snapshot_ctl>& snap_ctl, std::unique_ptr<http::request> req) {
|
||||
scrub_info info;
|
||||
auto rp = req_params({
|
||||
@@ -380,7 +414,7 @@ void unset_rpc_controller(http_context& ctx, routes& r) {
|
||||
}
|
||||
|
||||
void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
ss::repair_async.set(r, [&ctx, &repair](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
ss::repair_async.set(r, [&ctx, &repair](std::unique_ptr<http::request> req) {
|
||||
static std::unordered_set<sstring> options = {"primaryRange", "parallelism", "incremental",
|
||||
"jobThreads", "ranges", "columnFamilies", "dataCenters", "hosts", "ignore_nodes", "trace",
|
||||
"startToken", "endToken", "ranges_parallelism", "small_table_optimization"};
|
||||
@@ -393,7 +427,8 @@ void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
continue;
|
||||
}
|
||||
if (!options.contains(x.first)) {
|
||||
throw httpd::bad_param_exception(format("option {} is not supported", x.first));
|
||||
return make_exception_future<json::json_return_type>(
|
||||
httpd::bad_param_exception(format("option {} is not supported", x.first)));
|
||||
}
|
||||
}
|
||||
std::unordered_map<sstring, sstring> options_map;
|
||||
@@ -408,14 +443,10 @@ void set_repair(http_context& ctx, routes& r, sharded<repair_service>& repair) {
|
||||
// returns immediately, not waiting for the repair to finish. The user
|
||||
// then has other mechanisms to track the ongoing repair's progress,
|
||||
// or stop it.
|
||||
try {
|
||||
int res = co_await repair_start(repair, validate_keyspace(ctx, req), options_map);
|
||||
co_return json::json_return_type(res);
|
||||
} catch (const std::invalid_argument& e) {
|
||||
// if the option is not sane, repair_start() throws immediately, so
|
||||
// convert the exception to an HTTP error
|
||||
throw httpd::bad_param_exception(e.what());
|
||||
}
|
||||
return repair_start(repair, validate_keyspace(ctx, req->param),
|
||||
options_map).then([] (int i) {
|
||||
return make_ready_future<json::json_return_type>(i);
|
||||
});
|
||||
});
|
||||
|
||||
ss::get_active_repair_async.set(r, [&repair] (std::unique_ptr<http::request> req) {
|
||||
@@ -495,7 +526,7 @@ void unset_repair(http_context& ctx, routes& r) {
|
||||
|
||||
void set_sstables_loader(http_context& ctx, routes& r, sharded<sstables_loader>& sst_loader) {
|
||||
ss::load_new_ss_tables.set(r, [&ctx, &sst_loader](std::unique_ptr<http::request> req) {
|
||||
auto ks = validate_keyspace(ctx, req);
|
||||
auto ks = validate_keyspace(ctx, req->param);
|
||||
auto cf = req->get_query_param("cf");
|
||||
auto stream = req->get_query_param("load_and_stream");
|
||||
auto primary_replica = req->get_query_param("primary_replica_only");
|
||||
@@ -526,8 +557,8 @@ void unset_sstables_loader(http_context& ctx, routes& r) {
|
||||
|
||||
void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_builder>& vb) {
|
||||
ss::view_build_statuses.set(r, [&ctx, &vb] (std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto view = req->get_path_param("view");
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto view = req->param["view"];
|
||||
return vb.local().view_build_statuses(std::move(keyspace), std::move(view)).then([] (std::unordered_map<sstring, sstring> status) {
|
||||
std::vector<storage_service_json::mapper> res;
|
||||
return make_ready_future<json::json_return_type>(map_to_key_value(std::move(status), res));
|
||||
@@ -553,24 +584,8 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
return ctx.db.local().commitlog()->active_config().commit_log_location;
|
||||
});
|
||||
|
||||
ss::get_token_endpoint.set(r, [&ctx, &ss] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
const auto keyspace_name = req->get_query_param("keyspace");
|
||||
const auto table_name = req->get_query_param("cf");
|
||||
|
||||
std::map<dht::token, gms::inet_address> token_endpoints;
|
||||
if (keyspace_name.empty() && table_name.empty()) {
|
||||
token_endpoints = ss.local().get_token_to_endpoint_map();
|
||||
} else if (!keyspace_name.empty() && !table_name.empty()) {
|
||||
auto& db = ctx.db.local();
|
||||
if (!db.has_schema(keyspace_name, table_name)) {
|
||||
throw bad_param_exception(fmt::format("Failed to find table {}.{}", keyspace_name, table_name));
|
||||
}
|
||||
token_endpoints = co_await ss.local().get_tablet_to_endpoint_map(db.find_schema(keyspace_name, table_name)->id());
|
||||
} else {
|
||||
throw bad_param_exception("Either provide both keyspace and table (for tablet table) or neither (for vnodes)");
|
||||
}
|
||||
|
||||
co_return json::json_return_type(stream_range_as_array(token_endpoints, [](const auto& i) {
|
||||
ss::get_token_endpoint.set(r, [&ss] (std::unique_ptr<http::request> req) {
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(ss.local().get_token_to_endpoint_map(), [](const auto& i) {
|
||||
storage_service_json::mapper val;
|
||||
val.key = fmt::to_string(i.first);
|
||||
val.value = fmt::to_string(i.second);
|
||||
@@ -648,7 +663,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::get_range_to_endpoint_map.set(r, [&ctx, &ss](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto table = req->get_query_param("cf");
|
||||
|
||||
auto erm = std::invoke([&]() -> locator::effective_replication_map_ptr {
|
||||
@@ -679,7 +694,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
m.key.push("");
|
||||
}
|
||||
for (const gms::inet_address& address : entry.second) {
|
||||
m.value.push(fmt::to_string(address));
|
||||
m.value.push(address.to_sstring());
|
||||
}
|
||||
return m;
|
||||
});
|
||||
@@ -688,7 +703,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::get_pending_range_to_endpoint_map.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
std::vector<ss::maplist_mapper> res;
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
@@ -697,13 +712,13 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
if (!req->param.exists("keyspace")) {
|
||||
throw bad_param_exception("The keyspace param is not provided");
|
||||
}
|
||||
auto keyspace = req->get_path_param("keyspace");
|
||||
auto keyspace = req->param["keyspace"];
|
||||
auto table = req->get_query_param("table");
|
||||
if (!table.empty()) {
|
||||
validate_table(ctx, keyspace, table);
|
||||
return describe_ring_as_json_for_table(ss, keyspace, table);
|
||||
}
|
||||
return describe_ring_as_json(ss, validate_keyspace(ctx, req));
|
||||
return describe_ring_as_json(ss, validate_keyspace(ctx, req->param));
|
||||
});
|
||||
|
||||
ss::get_load.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
@@ -731,7 +746,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::get_natural_endpoints.set(r, [&ctx, &ss](const_req req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req.param);
|
||||
return container_to_vec(ss.local().get_natural_endpoints(keyspace, req.get_query_param("cf"),
|
||||
req.get_query_param("key")));
|
||||
});
|
||||
@@ -800,7 +815,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
|
||||
ss::force_keyspace_cleanup.set(r, [&ctx, &ss](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& db = ctx.db;
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto table_infos = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
const auto& rs = db.local().find_keyspace(keyspace).get_replication_strategy();
|
||||
if (rs.get_type() == locator::replication_strategy_type::local || !rs.is_vnode_based()) {
|
||||
@@ -895,7 +910,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::force_keyspace_flush.set(r, [&ctx](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_families = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
apilog.info("perform_keyspace_flush: keyspace={} tables={}", keyspace, column_families);
|
||||
auto& db = ctx.db;
|
||||
@@ -1004,7 +1019,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::truncate.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto column_family = req->get_query_param("cf");
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
@@ -1138,16 +1153,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::rebuild.set(r, [&ss](std::unique_ptr<http::request> req) {
|
||||
utils::optional_param source_dc;
|
||||
if (auto source_dc_str = req->get_query_param("source_dc"); !source_dc_str.empty()) {
|
||||
source_dc.emplace(std::move(source_dc_str)).set_user_provided();
|
||||
}
|
||||
if (auto force_str = req->get_query_param("force"); !force_str.empty() && service::loosen_constraints(validate_bool(force_str))) {
|
||||
if (!source_dc) {
|
||||
throw bad_param_exception("The `source_dc` option must be provided for using the `force` option");
|
||||
}
|
||||
source_dc.set_force();
|
||||
}
|
||||
auto source_dc = req->get_query_param("source_dc");
|
||||
apilog.info("rebuild: source_dc={}", source_dc);
|
||||
return ss.local().rebuild(std::move(source_dc)).then([] {
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
@@ -1157,14 +1163,14 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
ss::bulk_load.set(r, [](std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto path = req->get_path_param("path");
|
||||
auto path = req->param["path"];
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
ss::bulk_load_async.set(r, [](std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
auto path = req->get_path_param("path");
|
||||
auto path = req->param["path"];
|
||||
return make_ready_future<json::json_return_type>(json_void());
|
||||
});
|
||||
|
||||
@@ -1251,6 +1257,38 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
}
|
||||
});
|
||||
|
||||
ss::enable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_auto_compaction.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_auto_compaction: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_autocompaction(ctx, keyspace, tables, false);
|
||||
});
|
||||
|
||||
ss::enable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("enable_tombstone_gc: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_tombstone_gc(ctx, keyspace, tables, true);
|
||||
});
|
||||
|
||||
ss::disable_tombstone_gc.set(r, [&ctx](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto tables = parse_tables(keyspace, ctx, req->query_parameters, "cf");
|
||||
|
||||
apilog.info("disable_tombstone_gc: keyspace={} tables={}", keyspace, tables);
|
||||
return set_tables_tombstone_gc(ctx, keyspace, tables, false);
|
||||
});
|
||||
|
||||
ss::deliver_hints.set(r, [](std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
@@ -1344,7 +1382,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
});
|
||||
|
||||
ss::get_effective_ownership.set(r, [&ctx, &ss] (std::unique_ptr<http::request> req) {
|
||||
auto keyspace_name = req->get_path_param("keyspace") == "null" ? "" : validate_keyspace(ctx, req);
|
||||
auto keyspace_name = req->param["keyspace"] == "null" ? "" : validate_keyspace(ctx, req->param);
|
||||
auto table_name = req->get_query_param("cf");
|
||||
|
||||
if (!keyspace_name.empty()) {
|
||||
@@ -1584,11 +1622,6 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
ss::quiesce_topology.set(r, [&ss] (std::unique_ptr<http::request> req) -> future<json_return_type> {
|
||||
co_await ss.local().await_topology_quiesced();
|
||||
co_return json_void();
|
||||
});
|
||||
|
||||
sp::get_schema_versions.set(r, [&ss](std::unique_ptr<http::request> req) {
|
||||
return ss.local().describe_schema_versions().then([] (auto result) {
|
||||
std::vector<sp::mapper_list> res;
|
||||
@@ -1664,6 +1697,10 @@ void unset_storage_service(http_context& ctx, routes& r) {
|
||||
ss::get_trace_probability.unset(r);
|
||||
ss::get_slow_query_info.unset(r);
|
||||
ss::set_slow_query.unset(r);
|
||||
ss::enable_auto_compaction.unset(r);
|
||||
ss::disable_auto_compaction.unset(r);
|
||||
ss::enable_tombstone_gc.unset(r);
|
||||
ss::disable_tombstone_gc.unset(r);
|
||||
ss::deliver_hints.unset(r);
|
||||
ss::get_cluster_name.unset(r);
|
||||
ss::get_partitioner_name.unset(r);
|
||||
@@ -1688,7 +1725,6 @@ void unset_storage_service(http_context& ctx, routes& r) {
|
||||
ss::add_tablet_replica.unset(r);
|
||||
ss::del_tablet_replica.unset(r);
|
||||
ss::tablet_balancing_enable.unset(r);
|
||||
ss::quiesce_topology.unset(r);
|
||||
sp::get_schema_versions.unset(r);
|
||||
}
|
||||
|
||||
@@ -1696,41 +1732,32 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
ss::get_snapshot_details.set(r, [&snap_ctl](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto result = co_await snap_ctl.local().get_snapshot_details();
|
||||
co_return std::function([res = std::move(result)] (output_stream<char>&& o) -> future<> {
|
||||
std::exception_ptr ex;
|
||||
auto result = std::move(res);
|
||||
output_stream<char> out = std::move(o);
|
||||
try {
|
||||
auto result = std::move(res);
|
||||
bool first = true;
|
||||
bool first = true;
|
||||
|
||||
co_await out.write("[");
|
||||
for (auto& [name, details] : result) {
|
||||
if (!first) {
|
||||
co_await out.write(", ");
|
||||
}
|
||||
std::vector<ss::snapshot> snapshot;
|
||||
for (auto& cf : details) {
|
||||
ss::snapshot snp;
|
||||
snp.ks = cf.ks;
|
||||
snp.cf = cf.cf;
|
||||
snp.live = cf.details.live;
|
||||
snp.total = cf.details.total;
|
||||
snapshot.push_back(std::move(snp));
|
||||
}
|
||||
ss::snapshots all_snapshots;
|
||||
all_snapshots.key = name;
|
||||
all_snapshots.value = std::move(snapshot);
|
||||
co_await all_snapshots.write(out);
|
||||
first = false;
|
||||
co_await out.write("[");
|
||||
for (auto&& map : result) {
|
||||
if (!first) {
|
||||
co_await out.write(", ");
|
||||
}
|
||||
co_await out.write("]");
|
||||
co_await out.flush();
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
std::vector<ss::snapshot> snapshot;
|
||||
for (auto& cf : std::get<1>(map)) {
|
||||
ss::snapshot snp;
|
||||
snp.ks = cf.ks;
|
||||
snp.cf = cf.cf;
|
||||
snp.live = cf.live;
|
||||
snp.total = cf.total;
|
||||
snapshot.push_back(std::move(snp));
|
||||
}
|
||||
ss::snapshots all_snapshots;
|
||||
all_snapshots.key = std::get<0>(map);
|
||||
all_snapshots.value = std::move(snapshot);
|
||||
co_await all_snapshots.write(out);
|
||||
first = false;
|
||||
}
|
||||
co_await out.write("]");
|
||||
co_await out.close();
|
||||
if (ex) {
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1809,20 +1836,6 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
||||
|
||||
co_return json::json_return_type(static_cast<int>(scrub_status::successful));
|
||||
});
|
||||
|
||||
cf::get_true_snapshots_size.set(r, [&snap_ctl] (std::unique_ptr<http::request> req) {
|
||||
auto [ks, cf] = parse_fully_qualified_cf_name(req->get_path_param("name"));
|
||||
return snap_ctl.local().true_snapshots_size(std::move(ks), std::move(cf)).then([] (int64_t res) {
|
||||
return make_ready_future<json::json_return_type>(res);
|
||||
});
|
||||
});
|
||||
|
||||
cf::get_all_true_snapshots_size.set(r, [] (std::unique_ptr<http::request> req) {
|
||||
//TBD
|
||||
unimplemented();
|
||||
return make_ready_future<json::json_return_type>(0);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
void unset_snapshot(http_context& ctx, routes& r) {
|
||||
@@ -1831,8 +1844,6 @@ void unset_snapshot(http_context& ctx, routes& r) {
|
||||
ss::del_snapshot.unset(r);
|
||||
ss::true_snapshots_size.unset(r);
|
||||
ss::scrub.unset(r);
|
||||
cf::get_true_snapshots_size.unset(r);
|
||||
cf::get_all_true_snapshots_size.unset(r);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -40,11 +40,7 @@ sstring validate_keyspace(const http_context& ctx, sstring ks_name);
|
||||
|
||||
// verify that the keyspace parameter is found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective keyspace error.
|
||||
sstring validate_keyspace(const http_context& ctx, const std::unique_ptr<http::request>& req);
|
||||
|
||||
// verify that the table parameter is found, otherwise a bad_param_exception exception is thrown
|
||||
// containing the description of the respective table error.
|
||||
void validate_table(const http_context& ctx, sstring ks_name, sstring table_name);
|
||||
sstring validate_keyspace(const http_context& ctx, const httpd::parameters& param);
|
||||
|
||||
// splits a request parameter assumed to hold a comma-separated list of table names
|
||||
// verify that the tables are found, otherwise a bad_param_exception exception is thrown
|
||||
|
||||
@@ -108,7 +108,7 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
|
||||
hs::get_total_incoming_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
gms::inet_address peer(req->get_path_param("peer"));
|
||||
gms::inet_address peer(req->param["peer"]);
|
||||
return sm.map_reduce0([peer](streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards(peer).then([] (auto sbytes) {
|
||||
return sbytes.bytes_received;
|
||||
@@ -129,7 +129,7 @@ void set_stream_manager(http_context& ctx, routes& r, sharded<streaming::stream_
|
||||
});
|
||||
|
||||
hs::get_total_outgoing_bytes.set(r, [&sm](std::unique_ptr<request> req) {
|
||||
gms::inet_address peer(req->get_path_param("peer"));
|
||||
gms::inet_address peer(req->param["peer"]);
|
||||
return sm.map_reduce0([peer] (streaming::stream_manager& sm) {
|
||||
return sm.get_progress_on_all_shards(peer).then([] (auto sbytes) {
|
||||
return sbytes.bytes_sent;
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#include "api/api-doc/system.json.hh"
|
||||
#include "api/api-doc/metrics.json.hh"
|
||||
#include "replica/database.hh"
|
||||
#include "sstables/sstables_manager.hh"
|
||||
|
||||
#include <rapidjson/document.h>
|
||||
#include <seastar/core/reactor.hh>
|
||||
@@ -123,9 +122,9 @@ void set_system(http_context& ctx, routes& r) {
|
||||
|
||||
hs::get_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
return logging::level_name(logging::logger_registry().get_logger_level(req.get_path_param("name")));
|
||||
return logging::level_name(logging::logger_registry().get_logger_level(req.param["name"]));
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception("Unknown logger name " + req.get_path_param("name"));
|
||||
throw bad_param_exception("Unknown logger name " + req.param["name"]);
|
||||
}
|
||||
// just to keep the compiler happy
|
||||
return sstring();
|
||||
@@ -134,9 +133,9 @@ void set_system(http_context& ctx, routes& r) {
|
||||
hs::set_logger_level.set(r, [](const_req req) {
|
||||
try {
|
||||
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
|
||||
logging::logger_registry().set_logger_level(req.get_path_param("name"), level);
|
||||
logging::logger_registry().set_logger_level(req.param["name"], level);
|
||||
} catch (std::out_of_range& e) {
|
||||
throw bad_param_exception("Unknown logger name " + req.get_path_param("name"));
|
||||
throw bad_param_exception("Unknown logger name " + req.param["name"]);
|
||||
} catch (boost::bad_lexical_cast& e) {
|
||||
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
|
||||
}
|
||||
@@ -183,11 +182,6 @@ void set_system(http_context& ctx, routes& r) {
|
||||
apilog.info("Profile dumped to {}", profile_dest);
|
||||
return make_ready_future<json::json_return_type>(json::json_return_type(json::json_void()));
|
||||
}) ;
|
||||
|
||||
hs::get_highest_supported_sstable_version.set(r, [&ctx] (const_req req) {
|
||||
auto& table = ctx.db.local().find_column_family("system", "local");
|
||||
return seastar::to_sstring(table.get_sstables_manager().get_highest_supported_format());
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/exception.hh>
|
||||
#include <seastar/http/exception.hh>
|
||||
|
||||
#include "task_manager.hh"
|
||||
@@ -24,8 +23,6 @@ namespace tm = httpd::task_manager_json;
|
||||
using namespace json;
|
||||
using namespace seastar::httpd;
|
||||
|
||||
using task_variant = std::variant<tasks::task_manager::foreign_task_ptr, tasks::task_manager::task::task_essentials>;
|
||||
|
||||
inline bool filter_tasks(tasks::task_manager::task_ptr task, std::unordered_map<sstring, sstring>& query_params) {
|
||||
return (!query_params.contains("keyspace") || query_params["keyspace"] == task->get_status().keyspace) &&
|
||||
(!query_params.contains("table") || query_params["table"] == task->get_status().table);
|
||||
@@ -105,14 +102,13 @@ future<full_task_status> retrieve_status(const tasks::task_manager::foreign_task
|
||||
s.module = task->get_module_name();
|
||||
s.progress.completed = progress.completed;
|
||||
s.progress.total = progress.total;
|
||||
std::vector<std::string> ct = co_await task->get_children().map_each_task<std::string>([] (const tasks::task_manager::foreign_task_ptr& child) {
|
||||
std::vector<std::string> ct{task->get_children().size()};
|
||||
boost::transform(task->get_children(), ct.begin(), [] (const auto& child) {
|
||||
return child->id().to_sstring();
|
||||
}, [] (const tasks::task_manager::task::task_essentials& child) {
|
||||
return child.task_status.id.to_sstring();
|
||||
});
|
||||
s.children_ids = std::move(ct);
|
||||
co_return s;
|
||||
};
|
||||
}
|
||||
|
||||
void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>& tm, db::config& cfg) {
|
||||
tm::get_modules.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
@@ -127,7 +123,7 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
chunked_stats local_res;
|
||||
tasks::task_manager::module_ptr module;
|
||||
try {
|
||||
module = tm.find_module(req->get_path_param("module"));
|
||||
module = tm.find_module(req->param["module"]);
|
||||
} catch (...) {
|
||||
throw bad_param_exception(fmt::format("{}", std::current_exception()));
|
||||
}
|
||||
@@ -142,34 +138,25 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
|
||||
std::function<future<>(output_stream<char>&&)> f = [r = std::move(res)] (output_stream<char>&& os) -> future<> {
|
||||
auto s = std::move(os);
|
||||
std::exception_ptr ex;
|
||||
try {
|
||||
auto res = std::move(r);
|
||||
co_await s.write("[");
|
||||
std::string delim = "";
|
||||
for (auto& v: res) {
|
||||
for (auto& stats: v) {
|
||||
co_await s.write(std::exchange(delim, ", "));
|
||||
tm::task_stats ts;
|
||||
ts = stats;
|
||||
co_await formatter::write(s, ts);
|
||||
}
|
||||
auto res = std::move(r);
|
||||
co_await s.write("[");
|
||||
std::string delim = "";
|
||||
for (auto& v: res) {
|
||||
for (auto& stats: v) {
|
||||
co_await s.write(std::exchange(delim, ", "));
|
||||
tm::task_stats ts;
|
||||
ts = stats;
|
||||
co_await formatter::write(s, ts);
|
||||
}
|
||||
co_await s.write("]");
|
||||
co_await s.flush();
|
||||
} catch (...) {
|
||||
ex = std::current_exception();
|
||||
}
|
||||
co_await s.write("]");
|
||||
co_await s.close();
|
||||
if (ex) {
|
||||
co_await coroutine::return_exception_ptr(std::move(ex));
|
||||
}
|
||||
};
|
||||
co_return std::move(f);
|
||||
});
|
||||
|
||||
tm::get_task_status.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
task = co_await tasks::task_manager::invoke_on_task(tm, id, std::function([] (tasks::task_manager::task_ptr task) -> future<tasks::task_manager::foreign_task_ptr> {
|
||||
@@ -186,13 +173,13 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
});
|
||||
|
||||
tm::abort_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(tm, id, [] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
if (!task->is_abortable()) {
|
||||
co_await coroutine::return_exception(std::runtime_error("Requested task cannot be aborted"));
|
||||
}
|
||||
task->abort();
|
||||
co_await task->abort();
|
||||
});
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
@@ -201,11 +188,12 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
});
|
||||
|
||||
tm::wait_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
try {
|
||||
task = co_await tasks::task_manager::invoke_on_task(tm, id, std::function([] (tasks::task_manager::task_ptr task) {
|
||||
return task->done().then_wrapped([task] (auto f) {
|
||||
task->unregister_task();
|
||||
// done() is called only because we want the task to be complete before getting its status.
|
||||
// The future should be ignored here as the result does not matter.
|
||||
f.ignore_ready_future();
|
||||
@@ -221,8 +209,8 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
|
||||
tm::get_task_status_recursively.set(r, [&_tm = tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& tm = _tm;
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
std::queue<task_variant> q;
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
std::queue<tasks::task_manager::foreign_task_ptr> q;
|
||||
utils::chunked_vector<full_task_status> res;
|
||||
|
||||
tasks::task_manager::foreign_task_ptr task;
|
||||
@@ -242,33 +230,10 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
||||
q.push(co_await task.copy()); // Task cannot be moved since we need it to be alive during whole loop execution.
|
||||
while (!q.empty()) {
|
||||
auto& current = q.front();
|
||||
co_await std::visit(overloaded_functor {
|
||||
[&] (const tasks::task_manager::foreign_task_ptr& task) -> future<> {
|
||||
res.push_back(co_await retrieve_status(task));
|
||||
co_await task->get_children().for_each_task([&q] (const tasks::task_manager::foreign_task_ptr& child) -> future<> {
|
||||
q.push(co_await child.copy());
|
||||
}, [&] (const tasks::task_manager::task::task_essentials& child) {
|
||||
q.push(child);
|
||||
return make_ready_future();
|
||||
});
|
||||
},
|
||||
[&] (const tasks::task_manager::task::task_essentials& task) -> future<> {
|
||||
res.push_back(full_task_status{
|
||||
.task_status = task.task_status,
|
||||
.type = task.type,
|
||||
.progress = task.task_progress,
|
||||
.parent_id = task.parent_id,
|
||||
.abortable = task.abortable,
|
||||
.children_ids = boost::copy_range<std::vector<std::string>>(task.failed_children | boost::adaptors::transformed([] (auto& child) {
|
||||
return child.task_status.id.to_sstring();
|
||||
}))
|
||||
});
|
||||
for (auto& child: task.failed_children) {
|
||||
q.push(child);
|
||||
}
|
||||
return make_ready_future();
|
||||
}
|
||||
}, current);
|
||||
res.push_back(co_await retrieve_status(current));
|
||||
for (auto& child: current->get_children()) {
|
||||
q.push(co_await child.copy());
|
||||
}
|
||||
q.pop();
|
||||
}
|
||||
|
||||
|
||||
@@ -83,19 +83,20 @@ void set_task_manager_test(http_context& ctx, routes& r, sharded<tasks::task_man
|
||||
});
|
||||
|
||||
tmt::finish_test_task.set(r, [&tm] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto id = tasks::task_id{utils::UUID{req->get_path_param("task_id")}};
|
||||
auto id = tasks::task_id{utils::UUID{req->param["task_id"]}};
|
||||
auto it = req->query_parameters.find("error");
|
||||
bool fail = it != req->query_parameters.end();
|
||||
std::string error = fail ? it->second : "";
|
||||
|
||||
try {
|
||||
co_await tasks::task_manager::invoke_on_task(tm, id, [fail, error = std::move(error)] (tasks::task_manager::task_ptr task) -> future<> {
|
||||
co_await tasks::task_manager::invoke_on_task(tm, id, [fail, error = std::move(error)] (tasks::task_manager::task_ptr task) {
|
||||
tasks::test_task test_task{task};
|
||||
if (fail) {
|
||||
co_await test_task.finish_failed(std::make_exception_ptr(std::runtime_error(error)));
|
||||
test_task.finish_failed(std::make_exception_ptr(std::runtime_error(error)));
|
||||
} else {
|
||||
co_await test_task.finish();
|
||||
test_task.finish();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
} catch (tasks::task_manager::task_not_found& e) {
|
||||
throw bad_param_exception(e.what());
|
||||
|
||||
@@ -7,7 +7,6 @@
|
||||
*/
|
||||
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
#include "api/api.hh"
|
||||
#include "api/storage_service.hh"
|
||||
@@ -30,7 +29,7 @@ using ks_cf_func = std::function<future<json::json_return_type>(http_context&, s
|
||||
|
||||
static auto wrap_ks_cf(http_context &ctx, ks_cf_func f) {
|
||||
return [&ctx, f = std::move(f)](std::unique_ptr<http::request> req) {
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto table_infos = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
return f(ctx, std::move(req), std::move(keyspace), std::move(table_infos));
|
||||
};
|
||||
@@ -62,7 +61,7 @@ void set_tasks_compaction_module(http_context& ctx, routes& r, sharded<service::
|
||||
|
||||
t::force_keyspace_cleanup_async.set(r, [&ctx, &ss](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||
auto& db = ctx.db;
|
||||
auto keyspace = validate_keyspace(ctx, req);
|
||||
auto keyspace = validate_keyspace(ctx, req->param);
|
||||
auto table_infos = parse_table_infos(keyspace, ctx, req->query_parameters, "cf");
|
||||
apilog.info("force_keyspace_cleanup_async: keyspace={} tables={}", keyspace, table_infos);
|
||||
if (!co_await ss.local().is_cleanup_allowed(keyspace)) {
|
||||
|
||||
@@ -31,7 +31,7 @@ void set_token_metadata(http_context& ctx, routes& r, sharded<locator::shared_to
|
||||
});
|
||||
|
||||
ss::get_node_tokens.set(r, [&tm] (std::unique_ptr<http::request> req) {
|
||||
gms::inet_address addr(req->get_path_param("endpoint"));
|
||||
gms::inet_address addr(req->param["endpoint"]);
|
||||
auto& local_tm = *tm.local().get();
|
||||
const auto host_id = local_tm.get_host_id_if_known(addr);
|
||||
return make_ready_future<json::json_return_type>(stream_range_as_array(host_id ? local_tm.get_tokens(*host_id): std::vector<dht::token>{}, [](const dht::token& i) {
|
||||
|
||||
@@ -30,7 +30,6 @@ target_link_libraries(scylla_auth
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
PRIVATE
|
||||
absl::headers
|
||||
cql3
|
||||
idl
|
||||
wasmtime_bindings
|
||||
|
||||
@@ -50,7 +50,7 @@ inline bool is_anonymous(const authenticated_user& u) noexcept {
|
||||
/// The user name, or "anonymous".
|
||||
///
|
||||
template <>
|
||||
struct fmt::formatter<auth::authenticated_user> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<auth::authenticated_user> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const auth::authenticated_user& u, FormatContext& ctx) const {
|
||||
if (u.name) {
|
||||
|
||||
@@ -48,15 +48,15 @@ public:
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<auth::authentication_option> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<auth::authentication_option> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const auth::authentication_option a, FormatContext& ctx) const {
|
||||
using enum auth::authentication_option;
|
||||
switch (a) {
|
||||
case password:
|
||||
return formatter<string_view>::format("PASSWORD", ctx);
|
||||
return formatter<std::string_view>::format("PASSWORD", ctx);
|
||||
case options:
|
||||
return formatter<string_view>::format("OPTIONS", ctx);
|
||||
return formatter<std::string_view>::format("OPTIONS", ctx);
|
||||
}
|
||||
std::abort();
|
||||
}
|
||||
|
||||
@@ -10,10 +10,8 @@
|
||||
#include "auth/certificate_authenticator.hh"
|
||||
|
||||
#include <regex>
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
#include "utils/class_registrator.hh"
|
||||
#include "utils/to_string.hh"
|
||||
#include "data_dictionary/data_dictionary.hh"
|
||||
#include "cql3/query_processor.hh"
|
||||
#include "db/config.hh"
|
||||
@@ -76,7 +74,7 @@ auth::certificate_authenticator::certificate_authenticator(cql3::query_processor
|
||||
continue;
|
||||
} catch (std::out_of_range&) {
|
||||
// just fallthrough
|
||||
} catch (boost::regex_error&) {
|
||||
} catch (std::regex_error&) {
|
||||
std::throw_with_nested(std::invalid_argument(fmt::format("Invalid query expression: {}", map.at(cfg_query_attr))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
#include "service/raft/group0_state_machine.hh"
|
||||
#include "timeout_config.hh"
|
||||
#include "db/config.hh"
|
||||
#include "db/system_auth_keyspace.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
|
||||
namespace auth {
|
||||
@@ -40,14 +41,14 @@ constinit const std::string_view AUTH_PACKAGE_NAME("org.apache.cassandra.auth.")
|
||||
static logging::logger auth_log("auth");
|
||||
|
||||
bool legacy_mode(cql3::query_processor& qp) {
|
||||
return qp.auth_version < db::system_keyspace::auth_version_t::v2;
|
||||
return qp.auth_version < db::system_auth_keyspace::version_t::v2;
|
||||
}
|
||||
|
||||
std::string_view get_auth_ks_name(cql3::query_processor& qp) {
|
||||
if (legacy_mode(qp)) {
|
||||
return meta::legacy::AUTH_KS;
|
||||
}
|
||||
return db::system_keyspace::NAME;
|
||||
return db::system_auth_keyspace::NAME;
|
||||
}
|
||||
|
||||
// Func must support being invoked more than once.
|
||||
@@ -64,7 +65,7 @@ future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_f
|
||||
}).discard_result();
|
||||
}
|
||||
|
||||
static future<> create_legacy_metadata_table_if_missing_impl(
|
||||
static future<> create_metadata_table_if_missing_impl(
|
||||
std::string_view table_name,
|
||||
cql3::query_processor& qp,
|
||||
std::string_view cql,
|
||||
@@ -72,7 +73,7 @@ static future<> create_legacy_metadata_table_if_missing_impl(
|
||||
assert(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||
|
||||
auto db = qp.db();
|
||||
auto parsed_statement = cql3::query_processor::parse_statement(cql, cql3::dialect{});
|
||||
auto parsed_statement = cql3::query_processor::parse_statement(cql);
|
||||
auto& parsed_cf_statement = static_cast<cql3::statements::raw::cf_statement&>(*parsed_statement);
|
||||
|
||||
parsed_cf_statement.prepare_keyspace(meta::legacy::AUTH_KS);
|
||||
@@ -97,12 +98,12 @@ static future<> create_legacy_metadata_table_if_missing_impl(
|
||||
}
|
||||
}
|
||||
|
||||
future<> create_legacy_metadata_table_if_missing(
|
||||
future<> create_metadata_table_if_missing(
|
||||
std::string_view table_name,
|
||||
cql3::query_processor& qp,
|
||||
std::string_view cql,
|
||||
::service::migration_manager& mm) noexcept {
|
||||
return futurize_invoke(create_legacy_metadata_table_if_missing_impl, table_name, qp, cql, mm);
|
||||
return futurize_invoke(create_metadata_table_if_missing_impl, table_name, qp, cql, mm);
|
||||
}
|
||||
|
||||
::service::query_state& internal_distributed_query_state() noexcept {
|
||||
@@ -122,7 +123,7 @@ static future<> announce_mutations_with_guard(
|
||||
::service::raft_group0_client& group0_client,
|
||||
std::vector<canonical_mutation> muts,
|
||||
::service::group0_guard group0_guard,
|
||||
seastar::abort_source& as,
|
||||
seastar::abort_source* as,
|
||||
std::optional<::service::raft_timeout> timeout) {
|
||||
auto group0_cmd = group0_client.prepare_command(
|
||||
::service::write_mutations{
|
||||
@@ -138,7 +139,7 @@ future<> announce_mutations_with_batching(
|
||||
::service::raft_group0_client& group0_client,
|
||||
start_operation_func_t start_operation_func,
|
||||
std::function<mutations_generator(api::timestamp_type& t)> gen,
|
||||
seastar::abort_source& as,
|
||||
seastar::abort_source* as,
|
||||
std::optional<::service::raft_timeout> timeout) {
|
||||
// account for command's overhead, it's better to use smaller threshold than constantly bounce off the limit
|
||||
size_t memory_threshold = group0_client.max_command_size() * 0.75;
|
||||
@@ -189,7 +190,7 @@ future<> announce_mutations(
|
||||
::service::raft_group0_client& group0_client,
|
||||
const sstring query_string,
|
||||
std::vector<data_value_or_unset> values,
|
||||
seastar::abort_source& as,
|
||||
seastar::abort_source* as,
|
||||
std::optional<::service::raft_timeout> timeout) {
|
||||
auto group0_guard = co_await group0_client.start_operation(as, timeout);
|
||||
auto timestamp = group0_guard.write_timestamp();
|
||||
|
||||
@@ -70,7 +70,7 @@ future<> once_among_shards(Task&& f) {
|
||||
// Func must support being invoked more than once.
|
||||
future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_function<future<>()> func);
|
||||
|
||||
future<> create_legacy_metadata_table_if_missing(
|
||||
future<> create_metadata_table_if_missing(
|
||||
std::string_view table_name,
|
||||
cql3::query_processor&,
|
||||
std::string_view cql,
|
||||
@@ -84,7 +84,7 @@ future<> create_legacy_metadata_table_if_missing(
|
||||
// Execute update query via group0 mechanism, mutations will be applied on all nodes.
|
||||
// Use this function when need to perform read before write on a single guard or if
|
||||
// you have more than one mutation and potentially exceed single command size limit.
|
||||
using start_operation_func_t = std::function<future<::service::group0_guard>(abort_source&)>;
|
||||
using start_operation_func_t = std::function<future<::service::group0_guard>(abort_source*)>;
|
||||
using mutations_generator = coroutine::experimental::generator<mutation>;
|
||||
future<> announce_mutations_with_batching(
|
||||
::service::raft_group0_client& group0_client,
|
||||
@@ -93,7 +93,7 @@ future<> announce_mutations_with_batching(
|
||||
// function here
|
||||
start_operation_func_t start_operation_func,
|
||||
std::function<mutations_generator(api::timestamp_type& t)> gen,
|
||||
seastar::abort_source& as,
|
||||
seastar::abort_source* as,
|
||||
std::optional<::service::raft_timeout> timeout);
|
||||
|
||||
// Execute update query via group0 mechanism, mutations will be applied on all nodes.
|
||||
@@ -102,7 +102,7 @@ future<> announce_mutations(
|
||||
::service::raft_group0_client& group0_client,
|
||||
const sstring query_string,
|
||||
std::vector<data_value_or_unset> values,
|
||||
seastar::abort_source& as,
|
||||
seastar::abort_source* as,
|
||||
std::optional<::service::raft_timeout> timeout);
|
||||
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
*/
|
||||
|
||||
#include "auth/default_authorizer.hh"
|
||||
#include "db/system_keyspace.hh"
|
||||
#include "db/system_auth_keyspace.hh"
|
||||
|
||||
extern "C" {
|
||||
#include <crypt.h>
|
||||
@@ -103,7 +103,7 @@ future<> default_authorizer::migrate_legacy_metadata() {
|
||||
});
|
||||
}
|
||||
|
||||
future<> default_authorizer::start_legacy() {
|
||||
future<> default_authorizer::start() {
|
||||
static const sstring create_table = fmt::format(
|
||||
"CREATE TABLE {}.{} ("
|
||||
"{} text,"
|
||||
@@ -121,7 +121,7 @@ future<> default_authorizer::start_legacy() {
|
||||
90 * 24 * 60 * 60); // 3 months.
|
||||
|
||||
return once_among_shards([this] {
|
||||
return create_legacy_metadata_table_if_missing(
|
||||
return create_metadata_table_if_missing(
|
||||
PERMISSIONS_CF,
|
||||
_qp,
|
||||
create_table,
|
||||
@@ -144,13 +144,6 @@ future<> default_authorizer::start_legacy() {
|
||||
});
|
||||
}
|
||||
|
||||
future<> default_authorizer::start() {
|
||||
if (legacy_mode(_qp)) {
|
||||
return start_legacy();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}
|
||||
|
||||
future<> default_authorizer::stop() {
|
||||
_as.request_abort();
|
||||
return _finished.handle_exception_type([](const sleep_aborted&) {}).handle_exception_type([](const abort_requested_exception&) {});
|
||||
@@ -203,7 +196,7 @@ default_authorizer::modify(
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
}
|
||||
co_return co_await announce_mutations(_qp, _group0_client, query,
|
||||
{permissions::to_strings(set), sstring(role_name), resource.name()}, _as, ::service::raft_timeout{});
|
||||
{permissions::to_strings(set), sstring(role_name), resource.name()}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
|
||||
|
||||
@@ -256,7 +249,7 @@ future<> default_authorizer::revoke_all(std::string_view role_name) {
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name)}, _as, ::service::raft_timeout{});
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
} catch (exceptions::request_execution_exception& e) {
|
||||
alogger.warn("CassandraAuthorizer failed to revoke all permissions of {}: {}", role_name, e);
|
||||
@@ -346,9 +339,9 @@ future<> default_authorizer::revoke_all(const resource& resource) {
|
||||
const auto timeout = ::service::raft_timeout{};
|
||||
co_await announce_mutations_with_batching(
|
||||
_group0_client,
|
||||
[this, timeout](abort_source& as) { return _group0_client.start_operation(as, timeout); },
|
||||
[this, timeout](abort_source* as) { return _group0_client.start_operation(as, timeout); },
|
||||
std::move(gen),
|
||||
_as,
|
||||
&_as,
|
||||
timeout);
|
||||
} catch (exceptions::request_execution_exception& e) {
|
||||
alogger.warn("CassandraAuthorizer failed to revoke all permissions on {}: {}", name, e);
|
||||
|
||||
@@ -60,8 +60,6 @@ public:
|
||||
virtual const resource_set& protected_resources() const override;
|
||||
|
||||
private:
|
||||
future<> start_legacy();
|
||||
|
||||
bool legacy_metadata_exists() const;
|
||||
|
||||
future<> revoke_all_legacy(const resource&);
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
#include "auth/resource.hh"
|
||||
#include "auth/role_manager.hh"
|
||||
#include <seastar/core/future.hh>
|
||||
#include "seastar/core/future.hh"
|
||||
|
||||
namespace cql3 {
|
||||
class query_processor;
|
||||
|
||||
@@ -132,48 +132,48 @@ future<> password_authenticator::create_default_if_missing() {
|
||||
db::consistency_level::QUORUM,
|
||||
internal_distributed_query_state(),
|
||||
{salted_pwd, _superuser},
|
||||
cql3::query_processor::cache_internal::no);
|
||||
plogger.info("Created default superuser authentication record.");
|
||||
cql3::query_processor::cache_internal::no).then([](auto&&) {
|
||||
plogger.info("Created default superuser authentication record.");
|
||||
});
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query,
|
||||
{salted_pwd, _superuser}, _as, ::service::raft_timeout{});
|
||||
plogger.info("Created default superuser authentication record.");
|
||||
{salted_pwd, _superuser}, &_as, ::service::raft_timeout{}).then([]() {
|
||||
plogger.info("Created default superuser authentication record.");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
future<> password_authenticator::start() {
|
||||
return once_among_shards([this] {
|
||||
_stopped = do_after_system_ready(_as, [this] {
|
||||
return async([this] {
|
||||
if (legacy_mode(_qp)) {
|
||||
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||
return once_among_shards([this] {
|
||||
auto f = create_metadata_table_if_missing(
|
||||
meta::roles_table::name,
|
||||
_qp,
|
||||
meta::roles_table::creation_query(),
|
||||
_migration_manager);
|
||||
|
||||
if (any_nondefault_role_row_satisfies(_qp, &has_salted_hash, _superuser).get()) {
|
||||
if (legacy_metadata_exists()) {
|
||||
plogger.warn("Ignoring legacy authentication metadata since nondefault data already exist.");
|
||||
}
|
||||
_stopped = do_after_system_ready(_as, [this] {
|
||||
return async([this] {
|
||||
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||
|
||||
return;
|
||||
}
|
||||
if (any_nondefault_role_row_satisfies(_qp, &has_salted_hash, _superuser).get()) {
|
||||
if (legacy_metadata_exists()) {
|
||||
plogger.warn("Ignoring legacy authentication metadata since nondefault data already exist.");
|
||||
}
|
||||
|
||||
if (legacy_metadata_exists()) {
|
||||
migrate_legacy_metadata().get();
|
||||
return;
|
||||
}
|
||||
}
|
||||
create_default_if_missing().get();
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (legacy_mode(_qp)) {
|
||||
return create_legacy_metadata_table_if_missing(
|
||||
meta::roles_table::name,
|
||||
_qp,
|
||||
meta::roles_table::creation_query(),
|
||||
_migration_manager);
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
if (legacy_metadata_exists()) {
|
||||
migrate_legacy_metadata().get();
|
||||
return;
|
||||
}
|
||||
|
||||
create_default_if_missing().get();
|
||||
});
|
||||
});
|
||||
|
||||
return f;
|
||||
});
|
||||
}
|
||||
|
||||
future<> password_authenticator::stop() {
|
||||
@@ -271,7 +271,7 @@ future<> password_authenticator::create(std::string_view role_name, const authen
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query,
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}, _as, ::service::raft_timeout{});
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ future<> password_authenticator::alter(std::string_view role_name, const authent
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query,
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}, _as, ::service::raft_timeout{});
|
||||
{passwords::hash(*options.password, rng_for_salt), sstring(role_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,7 +311,7 @@ future<> password_authenticator::drop(std::string_view name) {
|
||||
{sstring(name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(name)}, _as, ::service::raft_timeout{});
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@
|
||||
|
||||
#include "auth/permissions_cache.hh"
|
||||
|
||||
#include <fmt/ranges.h>
|
||||
#include "auth/authorizer.hh"
|
||||
#include "auth/service.hh"
|
||||
|
||||
|
||||
@@ -41,28 +41,6 @@ enum class resource_kind {
|
||||
data, role, service_level, functions
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<auth::resource_kind> : fmt::formatter<string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const auth::resource_kind kind, FormatContext& ctx) const {
|
||||
using enum auth::resource_kind;
|
||||
switch (kind) {
|
||||
case data:
|
||||
return formatter<string_view>::format("data", ctx);
|
||||
case role:
|
||||
return formatter<string_view>::format("role", ctx);
|
||||
case service_level:
|
||||
return formatter<string_view>::format("service_level", ctx);
|
||||
case functions:
|
||||
return formatter<string_view>::format("functions", ctx);
|
||||
}
|
||||
std::abort();
|
||||
}
|
||||
};
|
||||
|
||||
namespace auth {
|
||||
///
|
||||
/// Type tag for constructing data resources.
|
||||
///
|
||||
@@ -268,6 +246,25 @@ std::pair<sstring, std::vector<data_type>> decode_signature(std::string_view enc
|
||||
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<auth::resource_kind> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const auth::resource_kind kind, FormatContext& ctx) const {
|
||||
using enum auth::resource_kind;
|
||||
switch (kind) {
|
||||
case data:
|
||||
return formatter<std::string_view>::format("data", ctx);
|
||||
case role:
|
||||
return formatter<std::string_view>::format("role", ctx);
|
||||
case service_level:
|
||||
return formatter<std::string_view>::format("service_level", ctx);
|
||||
case functions:
|
||||
return formatter<std::string_view>::format("functions", ctx);
|
||||
}
|
||||
std::abort();
|
||||
}
|
||||
};
|
||||
|
||||
namespace std {
|
||||
|
||||
template <>
|
||||
|
||||
@@ -28,9 +28,10 @@
|
||||
#include "db/config.hh"
|
||||
#include "db/consistency_level_type.hh"
|
||||
#include "db/functions/function_name.hh"
|
||||
#include "db/system_auth_keyspace.hh"
|
||||
#include "log.hh"
|
||||
#include "schema/schema_fwd.hh"
|
||||
#include <seastar/core/future.hh>
|
||||
#include "seastar/core/future.hh"
|
||||
#include "service/migration_manager.hh"
|
||||
#include "timestamp.hh"
|
||||
#include "utils/class_registrator.hh"
|
||||
@@ -171,7 +172,7 @@ service::service(
|
||||
used_by_maintenance_socket) {
|
||||
}
|
||||
|
||||
future<> service::create_legacy_keyspace_if_missing(::service::migration_manager& mm) const {
|
||||
future<> service::create_keyspace_if_missing(::service::migration_manager& mm) const {
|
||||
assert(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||
auto db = _qp.db();
|
||||
|
||||
@@ -203,12 +204,8 @@ future<> service::start(::service::migration_manager& mm, db::system_keyspace& s
|
||||
// version is set in query processor to be easily available in various places we call auth::legacy_mode check.
|
||||
_qp.auth_version = auth_version;
|
||||
if (!_used_by_maintenance_socket) {
|
||||
// this legacy keyspace is only used by cqlsh
|
||||
// it's needed when executing `list roles` or `list users`
|
||||
// it doesn't affect anything except that cqlsh fails if keyspace
|
||||
// is not found
|
||||
co_await once_among_shards([this, &mm] {
|
||||
return create_legacy_keyspace_if_missing(mm);
|
||||
return create_keyspace_if_missing(mm);
|
||||
});
|
||||
}
|
||||
co_await _role_manager->start();
|
||||
@@ -251,6 +248,51 @@ void service::reset_authorization_cache() {
|
||||
_qp.reset_cache();
|
||||
}
|
||||
|
||||
future<bool> service::has_existing_legacy_users() const {
|
||||
if (!_qp.db().has_schema(meta::legacy::AUTH_KS, meta::legacy::USERS_CF)) {
|
||||
return make_ready_future<bool>(false);
|
||||
}
|
||||
|
||||
static const sstring default_user_query = format("SELECT * FROM {}.{} WHERE {} = ?",
|
||||
meta::legacy::AUTH_KS,
|
||||
meta::legacy::USERS_CF,
|
||||
meta::user_name_col_name);
|
||||
|
||||
static const sstring all_users_query = format("SELECT * FROM {}.{} LIMIT 1",
|
||||
meta::legacy::AUTH_KS,
|
||||
meta::legacy::USERS_CF);
|
||||
|
||||
// This logic is borrowed directly from Apache Cassandra. By first checking for the presence of the default user, we
|
||||
// can potentially avoid doing a range query with a high consistency level.
|
||||
|
||||
return _qp.execute_internal(
|
||||
default_user_query,
|
||||
db::consistency_level::ONE,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::yes).then([this](auto results) {
|
||||
if (!results->empty()) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
default_user_query,
|
||||
db::consistency_level::QUORUM,
|
||||
{meta::DEFAULT_SUPERUSER_NAME},
|
||||
cql3::query_processor::cache_internal::yes).then([this](auto results) {
|
||||
if (!results->empty()) {
|
||||
return make_ready_future<bool>(true);
|
||||
}
|
||||
|
||||
return _qp.execute_internal(
|
||||
all_users_query,
|
||||
db::consistency_level::QUORUM,
|
||||
cql3::query_processor::cache_internal::no).then([](auto results) {
|
||||
return make_ready_future<bool>(!results->empty());
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
future<permission_set>
|
||||
service::get_uncached_permissions(const role_or_anonymous& maybe_role, const resource& r) const {
|
||||
if (is_anonymous(maybe_role)) {
|
||||
@@ -643,7 +685,7 @@ future<> migrate_to_auth_v2(db::system_keyspace& sys_ks, ::service::raft_group0_
|
||||
}
|
||||
auto muts = co_await qp.get_mutations_internal(
|
||||
format("INSERT INTO {}.{} ({}) VALUES ({})",
|
||||
db::system_keyspace::NAME,
|
||||
db::system_auth_keyspace::NAME,
|
||||
cf_name,
|
||||
col_names_str,
|
||||
val_binders_str),
|
||||
@@ -658,12 +700,12 @@ future<> migrate_to_auth_v2(db::system_keyspace& sys_ks, ::service::raft_group0_
|
||||
}
|
||||
}
|
||||
co_yield co_await sys_ks.make_auth_version_mutation(ts,
|
||||
db::system_keyspace::auth_version_t::v2);
|
||||
db::system_auth_keyspace::version_t::v2);
|
||||
};
|
||||
co_await announce_mutations_with_batching(g0,
|
||||
start_operation_func,
|
||||
std::move(gen),
|
||||
as,
|
||||
&as,
|
||||
std::nullopt);
|
||||
}
|
||||
|
||||
|
||||
@@ -175,7 +175,9 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
future<> create_legacy_keyspace_if_missing(::service::migration_manager& mm) const;
|
||||
future<bool> has_existing_legacy_users() const;
|
||||
|
||||
future<> create_keyspace_if_missing(::service::migration_manager& mm) const;
|
||||
};
|
||||
|
||||
future<bool> has_superuser(const service&, const authenticated_user&);
|
||||
|
||||
@@ -143,7 +143,7 @@ const resource_set& standard_role_manager::protected_resources() const {
|
||||
return resources;
|
||||
}
|
||||
|
||||
future<> standard_role_manager::create_legacy_metadata_tables_if_missing() const {
|
||||
future<> standard_role_manager::create_metadata_tables_if_missing() const {
|
||||
static const sstring create_role_members_query = fmt::format(
|
||||
"CREATE TABLE {}.{} ("
|
||||
" role text,"
|
||||
@@ -155,17 +155,17 @@ future<> standard_role_manager::create_legacy_metadata_tables_if_missing() const
|
||||
|
||||
|
||||
return when_all_succeed(
|
||||
create_legacy_metadata_table_if_missing(
|
||||
create_metadata_table_if_missing(
|
||||
meta::roles_table::name,
|
||||
_qp,
|
||||
meta::roles_table::creation_query(),
|
||||
_migration_manager),
|
||||
create_legacy_metadata_table_if_missing(
|
||||
create_metadata_table_if_missing(
|
||||
meta::role_members_table::name,
|
||||
_qp,
|
||||
create_role_members_query,
|
||||
_migration_manager),
|
||||
create_legacy_metadata_table_if_missing(
|
||||
create_metadata_table_if_missing(
|
||||
meta::role_attributes_table::name,
|
||||
_qp,
|
||||
meta::role_attributes_table::creation_query(),
|
||||
@@ -190,7 +190,7 @@ future<> standard_role_manager::create_default_role_if_missing() {
|
||||
{_superuser},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query, {_superuser}, _as, ::service::raft_timeout{});
|
||||
co_await announce_mutations(_qp, _group0_client, query, {_superuser}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
log.info("Created default superuser role '{}'.", _superuser);
|
||||
} catch(const exceptions::unavailable_exception& e) {
|
||||
@@ -236,30 +236,24 @@ future<> standard_role_manager::migrate_legacy_metadata() {
|
||||
|
||||
future<> standard_role_manager::start() {
|
||||
return once_among_shards([this] {
|
||||
return futurize_invoke([this] () {
|
||||
if (legacy_mode(_qp)) {
|
||||
return this->create_legacy_metadata_tables_if_missing();
|
||||
}
|
||||
return make_ready_future<>();
|
||||
}).then([this] {
|
||||
return this->create_metadata_tables_if_missing().then([this] {
|
||||
_stopped = auth::do_after_system_ready(_as, [this] {
|
||||
return seastar::async([this] {
|
||||
if (legacy_mode(_qp)) {
|
||||
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||
|
||||
if (any_nondefault_role_row_satisfies(_qp, &has_can_login).get()) {
|
||||
if (this->legacy_metadata_exists()) {
|
||||
log.warn("Ignoring legacy user metadata since nondefault roles already exist.");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||
|
||||
if (any_nondefault_role_row_satisfies(_qp, &has_can_login).get()) {
|
||||
if (this->legacy_metadata_exists()) {
|
||||
this->migrate_legacy_metadata().get();
|
||||
return;
|
||||
log.warn("Ignoring legacy user metadata since nondefault roles already exist.");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (this->legacy_metadata_exists()) {
|
||||
this->migrate_legacy_metadata().get();
|
||||
return;
|
||||
}
|
||||
|
||||
create_default_role_if_missing().get();
|
||||
});
|
||||
});
|
||||
@@ -285,7 +279,7 @@ future<> standard_role_manager::create_or_replace(std::string_view role_name, co
|
||||
{sstring(role_name), c.is_superuser, c.can_login},
|
||||
cql3::query_processor::cache_internal::yes).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name), c.is_superuser, c.can_login}, _as, ::service::raft_timeout{});
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name), c.is_superuser, c.can_login}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,7 +327,7 @@ standard_role_manager::alter(std::string_view role_name, const role_config_updat
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
return announce_mutations(_qp, _group0_client, std::move(query), {sstring(role_name)}, _as, ::service::raft_timeout{});
|
||||
return announce_mutations(_qp, _group0_client, std::move(query), {sstring(role_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
});
|
||||
}
|
||||
@@ -383,7 +377,7 @@ future<> standard_role_manager::drop(std::string_view role_name) {
|
||||
co_await _qp.execute_internal(query, {sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::yes).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name)}, _as, ::service::raft_timeout{});
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
};
|
||||
// Finally, delete the role itself.
|
||||
@@ -401,7 +395,7 @@ future<> standard_role_manager::drop(std::string_view role_name) {
|
||||
{sstring(role_name)},
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name)}, _as, ::service::raft_timeout{});
|
||||
co_await announce_mutations(_qp, _group0_client, query, {sstring(role_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -434,7 +428,7 @@ standard_role_manager::modify_membership(
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, std::move(query),
|
||||
{role_set{sstring(role_name)}, sstring(grantee_name)}, _as, ::service::raft_timeout{});
|
||||
{role_set{sstring(role_name)}, sstring(grantee_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -453,7 +447,7 @@ standard_role_manager::modify_membership(
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_return co_await announce_mutations(_qp, _group0_client, insert_query,
|
||||
{sstring(role_name), sstring(grantee_name)}, _as, ::service::raft_timeout{});
|
||||
{sstring(role_name), sstring(grantee_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -470,7 +464,7 @@ standard_role_manager::modify_membership(
|
||||
cql3::query_processor::cache_internal::no).discard_result();
|
||||
} else {
|
||||
co_return co_await announce_mutations(_qp, _group0_client, delete_query,
|
||||
{sstring(role_name), sstring(grantee_name)}, _as, ::service::raft_timeout{});
|
||||
{sstring(role_name), sstring(grantee_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -644,7 +638,7 @@ future<> standard_role_manager::set_attribute(std::string_view role_name, std::s
|
||||
co_await _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name), sstring(attribute_value)}, cql3::query_processor::cache_internal::yes).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query,
|
||||
{sstring(role_name), sstring(attribute_name), sstring(attribute_value)}, _as, ::service::raft_timeout{});
|
||||
{sstring(role_name), sstring(attribute_name), sstring(attribute_value)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -659,7 +653,7 @@ future<> standard_role_manager::remove_attribute(std::string_view role_name, std
|
||||
co_await _qp.execute_internal(query, {sstring(role_name), sstring(attribute_name)}, cql3::query_processor::cache_internal::yes).discard_result();
|
||||
} else {
|
||||
co_await announce_mutations(_qp, _group0_client, query,
|
||||
{sstring(role_name), sstring(attribute_name)}, _as, ::service::raft_timeout{});
|
||||
{sstring(role_name), sstring(attribute_name)}, &_as, ::service::raft_timeout{});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ public:
|
||||
private:
|
||||
enum class membership_change { add, remove };
|
||||
|
||||
future<> create_legacy_metadata_tables_if_missing() const;
|
||||
future<> create_metadata_tables_if_missing() const;
|
||||
|
||||
bool legacy_metadata_exists();
|
||||
|
||||
|
||||
14
bin/nodetool
14
bin/nodetool
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (C) 2024-present ScyllaDB
|
||||
#
|
||||
#
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
|
||||
SCRIPT_PATH=$(dirname $(realpath "$0"))
|
||||
|
||||
INSTALLED_SCYLLA_PATH="${SCRIPT_PATH}/scylla"
|
||||
|
||||
# Allow plugging scylla path for local testing
|
||||
exec ${SCYLLA:-${INSTALLED_SCYLLA_PATH}} nodetool $@
|
||||
@@ -11,7 +11,6 @@ target_include_directories(cdc
|
||||
${CMAKE_SOURCE_DIR})
|
||||
target_link_libraries(cdc
|
||||
PUBLIC
|
||||
absl::headers
|
||||
Seastar::seastar
|
||||
xxHash::xxhash
|
||||
PRIVATE
|
||||
|
||||
@@ -66,10 +66,10 @@ public:
|
||||
|
||||
} // namespace cdc
|
||||
|
||||
template <> struct fmt::formatter<cdc::image_mode> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<cdc::image_mode> : fmt::formatter<std::string_view> {
|
||||
auto format(cdc::image_mode, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
template <> struct fmt::formatter<cdc::delta_mode> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<cdc::delta_mode> : fmt::formatter<std::string_view> {
|
||||
auto format(cdc::delta_mode, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "gms/feature_service.hh"
|
||||
#include "utils/error_injection.hh"
|
||||
#include "utils/UUID_gen.hh"
|
||||
#include "utils/to_string.hh"
|
||||
|
||||
#include "cdc/generation.hh"
|
||||
#include "cdc/cdc_options.hh"
|
||||
@@ -184,7 +183,7 @@ static std::vector<stream_id> create_stream_ids(
|
||||
size_t index, dht::token start, dht::token end, size_t shard_count, uint8_t ignore_msb) {
|
||||
std::vector<stream_id> result;
|
||||
result.reserve(shard_count);
|
||||
dht::static_sharder sharder(shard_count, ignore_msb);
|
||||
dht::sharder sharder(shard_count, ignore_msb);
|
||||
for (size_t shard_idx = 0; shard_idx < shard_count; ++shard_idx) {
|
||||
auto t = dht::find_first_token_for_shard(sharder, start, end, shard_idx);
|
||||
// compose the id from token and the "index" of the range end owning vnode
|
||||
@@ -473,21 +472,16 @@ static std::optional<cdc::generation_id> get_generation_id_for(const gms::inet_a
|
||||
|
||||
static future<std::optional<cdc::topology_description>> retrieve_generation_data_v2(
|
||||
cdc::generation_id_v2 id,
|
||||
bool raft_experimental_topology,
|
||||
db::system_keyspace& sys_ks,
|
||||
db::system_distributed_keyspace& sys_dist_ks) {
|
||||
auto cdc_gen = co_await sys_dist_ks.read_cdc_generation(id.id);
|
||||
|
||||
if (!cdc_gen && id.id.is_timestamp()) {
|
||||
if (raft_experimental_topology && !cdc_gen) {
|
||||
// If we entered legacy mode due to recovery, we (or some other node)
|
||||
// might gossip about a generation that was previously propagated
|
||||
// through raft. If that's the case, it will sit in
|
||||
// the system.cdc_generations_v3 table.
|
||||
//
|
||||
// If the provided id is not a timeuuid, we don't want to query
|
||||
// the system.cdc_generations_v3 table. This table stores generation
|
||||
// ids as timeuuids. If the provided id is not a timeuuid, the
|
||||
// generation cannot be in system.cdc_generations_v3. Also, the query
|
||||
// would fail with a marshaling error.
|
||||
cdc_gen = co_await sys_ks.read_cdc_generation_opt(id.id);
|
||||
}
|
||||
|
||||
@@ -496,6 +490,7 @@ static future<std::optional<cdc::topology_description>> retrieve_generation_data
|
||||
|
||||
static future<std::optional<cdc::topology_description>> retrieve_generation_data(
|
||||
cdc::generation_id gen_id,
|
||||
bool raft_experimental_topology,
|
||||
db::system_keyspace& sys_ks,
|
||||
db::system_distributed_keyspace& sys_dist_ks,
|
||||
db::system_distributed_keyspace::context ctx) {
|
||||
@@ -504,13 +499,14 @@ static future<std::optional<cdc::topology_description>> retrieve_generation_data
|
||||
return sys_dist_ks.read_cdc_topology_description(id, ctx);
|
||||
},
|
||||
[&] (const cdc::generation_id_v2& id) {
|
||||
return retrieve_generation_data_v2(id, sys_ks, sys_dist_ks);
|
||||
return retrieve_generation_data_v2(id, raft_experimental_topology, sys_ks, sys_dist_ks);
|
||||
}
|
||||
), gen_id);
|
||||
}
|
||||
|
||||
static future<> do_update_streams_description(
|
||||
cdc::generation_id gen_id,
|
||||
bool raft_experimental_topology,
|
||||
db::system_keyspace& sys_ks,
|
||||
db::system_distributed_keyspace& sys_dist_ks,
|
||||
db::system_distributed_keyspace::context ctx) {
|
||||
@@ -521,7 +517,7 @@ static future<> do_update_streams_description(
|
||||
|
||||
// We might race with another node also inserting the description, but that's ok. It's an idempotent operation.
|
||||
|
||||
auto topo = co_await retrieve_generation_data(gen_id, sys_ks, sys_dist_ks, ctx);
|
||||
auto topo = co_await retrieve_generation_data(gen_id, raft_experimental_topology, sys_ks, sys_dist_ks, ctx);
|
||||
if (!topo) {
|
||||
throw no_generation_data_exception(gen_id);
|
||||
}
|
||||
@@ -540,12 +536,13 @@ static future<> do_update_streams_description(
|
||||
*/
|
||||
static future<> update_streams_description(
|
||||
cdc::generation_id gen_id,
|
||||
bool raft_experimental_topology,
|
||||
db::system_keyspace& sys_ks,
|
||||
shared_ptr<db::system_distributed_keyspace> sys_dist_ks,
|
||||
noncopyable_function<unsigned()> get_num_token_owners,
|
||||
abort_source& abort_src) {
|
||||
try {
|
||||
co_await do_update_streams_description(gen_id, sys_ks, *sys_dist_ks, { get_num_token_owners() });
|
||||
co_await do_update_streams_description(gen_id, raft_experimental_topology, sys_ks, *sys_dist_ks, { get_num_token_owners() });
|
||||
} catch (...) {
|
||||
cdc_log.warn(
|
||||
"Could not update CDC description table with generation {}: {}. Will retry in the background.",
|
||||
@@ -553,6 +550,7 @@ static future<> update_streams_description(
|
||||
|
||||
// It is safe to discard this future: we keep system distributed keyspace alive.
|
||||
(void)(([] (cdc::generation_id gen_id,
|
||||
bool raft_experimental_topology,
|
||||
db::system_keyspace& sys_ks,
|
||||
shared_ptr<db::system_distributed_keyspace> sys_dist_ks,
|
||||
noncopyable_function<unsigned()> get_num_token_owners,
|
||||
@@ -565,7 +563,7 @@ static future<> update_streams_description(
|
||||
co_return;
|
||||
}
|
||||
try {
|
||||
co_await do_update_streams_description(gen_id, sys_ks, *sys_dist_ks, { get_num_token_owners() });
|
||||
co_await do_update_streams_description(gen_id, raft_experimental_topology, sys_ks, *sys_dist_ks, { get_num_token_owners() });
|
||||
co_return;
|
||||
} catch (...) {
|
||||
cdc_log.warn(
|
||||
@@ -573,7 +571,7 @@ static future<> update_streams_description(
|
||||
gen_id, std::current_exception());
|
||||
}
|
||||
}
|
||||
})(gen_id, sys_ks, std::move(sys_dist_ks), std::move(get_num_token_owners), abort_src));
|
||||
})(gen_id, raft_experimental_topology, sys_ks, std::move(sys_dist_ks), std::move(get_num_token_owners), abort_src));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -611,6 +609,7 @@ struct time_and_ttl {
|
||||
*/
|
||||
static future<std::optional<cdc::generation_id_v1>> rewrite_streams_descriptions(
|
||||
std::vector<time_and_ttl> times_and_ttls,
|
||||
bool raft_experimental_topology,
|
||||
db::system_keyspace& sys_ks,
|
||||
shared_ptr<db::system_distributed_keyspace> sys_dist_ks,
|
||||
noncopyable_function<unsigned()> get_num_token_owners,
|
||||
@@ -655,7 +654,7 @@ static future<std::optional<cdc::generation_id_v1>> rewrite_streams_descriptions
|
||||
co_await max_concurrent_for_each(first, tss.end(), 10, [&] (db_clock::time_point ts) -> future<> {
|
||||
while (true) {
|
||||
try {
|
||||
co_return co_await do_update_streams_description(cdc::generation_id_v1{ts}, sys_ks, *sys_dist_ks, { get_num_token_owners() });
|
||||
co_return co_await do_update_streams_description(cdc::generation_id_v1{ts}, raft_experimental_topology, sys_ks, *sys_dist_ks, { get_num_token_owners() });
|
||||
} catch (const no_generation_data_exception& e) {
|
||||
cdc_log.error("Failed to rewrite streams for generation {}: {}. Giving up.", ts, e);
|
||||
each_success = false;
|
||||
@@ -731,6 +730,7 @@ future<> generation_service::maybe_rewrite_streams_descriptions() {
|
||||
cdc_log.info("Rewriting stream tables in the background...");
|
||||
auto last_rewritten = co_await rewrite_streams_descriptions(
|
||||
std::move(times_and_ttls),
|
||||
_cfg.raft_experimental_topology,
|
||||
_sys_ks.local(),
|
||||
_sys_dist_ks.local_shared(),
|
||||
std::move(get_num_token_owners),
|
||||
@@ -906,7 +906,7 @@ future<> generation_service::check_and_repair_cdc_streams() {
|
||||
|
||||
std::optional<topology_description> gen;
|
||||
try {
|
||||
gen = co_await retrieve_generation_data(*latest, _sys_ks.local(), *sys_dist_ks, { tmptr->count_normal_token_owners() });
|
||||
gen = co_await retrieve_generation_data(*latest, _cfg.raft_experimental_topology, _sys_ks.local(), *sys_dist_ks, { tmptr->count_normal_token_owners() });
|
||||
} catch (exceptions::request_timeout_exception& e) {
|
||||
cdc_log.error("{}: \"{}\". {}.", timeout_msg, e.what(), exception_translating_msg);
|
||||
throw exceptions::request_execution_exception(exceptions::exception_code::READ_TIMEOUT,
|
||||
@@ -1022,7 +1022,7 @@ future<> generation_service::legacy_handle_cdc_generation(std::optional<cdc::gen
|
||||
|
||||
if (using_this_gen) {
|
||||
cdc_log.info("Starting to use generation {}", *gen_id);
|
||||
co_await update_streams_description(*gen_id, _sys_ks.local(), get_sys_dist_ks(),
|
||||
co_await update_streams_description(*gen_id, _cfg.raft_experimental_topology, _sys_ks.local(), get_sys_dist_ks(),
|
||||
[tmptr = _token_metadata.get()] { return tmptr->count_normal_token_owners(); },
|
||||
_abort_src);
|
||||
}
|
||||
@@ -1039,7 +1039,7 @@ void generation_service::legacy_async_handle_cdc_generation(cdc::generation_id g
|
||||
bool using_this_gen = co_await svc->legacy_do_handle_cdc_generation_intercept_nonfatal_errors(gen_id);
|
||||
if (using_this_gen) {
|
||||
cdc_log.info("Starting to use generation {}", gen_id);
|
||||
co_await update_streams_description(gen_id, svc->_sys_ks.local(), svc->get_sys_dist_ks(),
|
||||
co_await update_streams_description(gen_id, svc->_cfg.raft_experimental_topology, svc->_sys_ks.local(), svc->get_sys_dist_ks(),
|
||||
[tmptr = svc->_token_metadata.get()] { return tmptr->count_normal_token_owners(); },
|
||||
svc->_abort_src);
|
||||
}
|
||||
@@ -1108,7 +1108,7 @@ future<bool> generation_service::legacy_do_handle_cdc_generation(cdc::generation
|
||||
assert_shard_zero(__PRETTY_FUNCTION__);
|
||||
|
||||
auto sys_dist_ks = get_sys_dist_ks();
|
||||
auto gen = co_await retrieve_generation_data(gen_id, _sys_ks.local(), *sys_dist_ks, { _token_metadata.get()->count_normal_token_owners() });
|
||||
auto gen = co_await retrieve_generation_data(gen_id, _cfg.raft_experimental_topology, _sys_ks.local(), *sys_dist_ks, { _token_metadata.get()->count_normal_token_owners() });
|
||||
if (!gen) {
|
||||
throw std::runtime_error(format(
|
||||
"Could not find CDC generation {} in distributed system tables (current time: {}),"
|
||||
|
||||
@@ -189,7 +189,7 @@ future<utils::chunked_vector<mutation>> get_cdc_generation_mutations_v3(
|
||||
#if FMT_VERSION < 100000
|
||||
// fmt v10 introduced formatter for std::exception
|
||||
template <>
|
||||
struct fmt::formatter<cdc::no_generation_data_exception> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<cdc::no_generation_data_exception> : fmt::formatter<std::string_view> {
|
||||
auto format(const cdc::no_generation_data_exception& e, fmt::format_context& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{}", e.what());
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ public:
|
||||
unsigned ignore_msb_bits;
|
||||
std::chrono::milliseconds ring_delay;
|
||||
bool dont_rewrite_streams = false;
|
||||
bool raft_experimental_topology = false;
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
@@ -155,10 +155,10 @@ public:
|
||||
friend fmt::formatter<bound_view>;
|
||||
};
|
||||
|
||||
template <> struct fmt::formatter<bound_kind> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<bound_kind> : fmt::formatter<std::string_view> {
|
||||
auto format(bound_kind, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
template <> struct fmt::formatter<bound_view> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<bound_view> : fmt::formatter<std::string_view> {
|
||||
auto format(const bound_view& b, fmt::format_context& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{{bound: prefix={},kind={}}}", b._prefix.get(), b._kind);
|
||||
}
|
||||
|
||||
@@ -124,7 +124,7 @@ public:
|
||||
position_range_iterator end() const { return {_set.end()}; }
|
||||
};
|
||||
|
||||
template <> struct fmt::formatter<clustering_interval_set> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<clustering_interval_set> : fmt::formatter<std::string_view> {
|
||||
auto format(const clustering_interval_set& set, fmt::format_context& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{{{}}}", fmt::join(set, ",\n "));
|
||||
}
|
||||
|
||||
@@ -69,7 +69,6 @@ function(add_version_library name source)
|
||||
scylla-version-gen)
|
||||
target_compile_definitions(${name}
|
||||
PRIVATE
|
||||
SCYLLA_PRODUCT=\"${Scylla_PRODUCT}\"
|
||||
SCYLLA_VERSION=\"${Scylla_VERSION}\"
|
||||
SCYLLA_RELEASE=\"${Scylla_RELEASE}\")
|
||||
target_link_libraries(${name}
|
||||
|
||||
@@ -41,7 +41,7 @@ macro(dist_submodule name dir pkgs)
|
||||
endif()
|
||||
set(pkg_name "${Scylla_PRODUCT}-${name}-${Scylla_VERSION}-${Scylla_RELEASE}.${arch}.tar.gz")
|
||||
set(reloc_pkg "${CMAKE_SOURCE_DIR}/tools/${dir}/build/${pkg_name}")
|
||||
set(dist_pkg "${CMAKE_BINARY_DIR}/$<CONFIG>/dist/tar/${pkg_name}")
|
||||
set(dist_pkg "${CMAKE_CURRENT_BINARY_DIR}/${pkg_name}")
|
||||
add_custom_command(
|
||||
OUTPUT ${dist_pkg}
|
||||
COMMAND ${CMAKE_COMMAND} -E copy ${reloc_pkg} ${dist_pkg}
|
||||
|
||||
@@ -106,7 +106,6 @@ auto fmt::formatter<collection_mutation_view::printer>::format(const collection_
|
||||
[&] (const collection_type_impl& ctype) {
|
||||
auto&& key_type = ctype.name_comparator();
|
||||
auto&& value_type = ctype.value_comparator();
|
||||
out = fmt::format_to(out, " collection cells {{");
|
||||
for (auto&& [key, value] : cmvd.cells) {
|
||||
if (!first) {
|
||||
out = fmt::format_to(out, ", ");
|
||||
@@ -114,25 +113,20 @@ auto fmt::formatter<collection_mutation_view::printer>::format(const collection_
|
||||
fmt::format_to(out, "{}: {}", key_type->to_string(key), atomic_cell_view::printer(*value_type, value));
|
||||
first = false;
|
||||
}
|
||||
out = fmt::format_to(out, "}}");
|
||||
},
|
||||
[&] (const user_type_impl& utype) {
|
||||
out = fmt::format_to(out, " user-type cells {{");
|
||||
for (auto&& [raw_idx, value] : cmvd.cells) {
|
||||
if (first) {
|
||||
out = fmt::format_to(out, " ");
|
||||
} else {
|
||||
if (!first) {
|
||||
out = fmt::format_to(out, ", ");
|
||||
}
|
||||
auto idx = deserialize_field_index(raw_idx);
|
||||
out = fmt::format_to(out, "{}: {}", utype.field_name_as_string(idx), atomic_cell_view::printer(*utype.type(idx), value));
|
||||
first = false;
|
||||
}
|
||||
out = fmt::format_to(out, "}}");
|
||||
},
|
||||
[&] (const abstract_type& o) {
|
||||
// Not throwing exception in this likely-to-be debug context
|
||||
out = fmt::format_to(out, " attempted to pretty-print collection_mutation_view_description with type {}", o.name());
|
||||
out = fmt::format_to(out, "attempted to pretty-print collection_mutation_view_description with type {}", o.name());
|
||||
}
|
||||
));
|
||||
});
|
||||
|
||||
@@ -132,7 +132,7 @@ collection_mutation difference(const abstract_type&, collection_mutation_view, c
|
||||
bytes_ostream serialize_for_cql(const abstract_type&, collection_mutation_view);
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<collection_mutation_view::printer> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<collection_mutation_view::printer> : fmt::formatter<std::string_view> {
|
||||
auto format(const collection_mutation_view::printer&, fmt::format_context& ctx) const
|
||||
-> decltype(ctx.out());
|
||||
};
|
||||
|
||||
@@ -171,8 +171,7 @@ static api::timestamp_type get_max_purgeable_timestamp(const table_state& table_
|
||||
}
|
||||
|
||||
static std::vector<shared_sstable> get_uncompacting_sstables(const table_state& table_s, std::vector<shared_sstable> sstables) {
|
||||
auto sstable_set = table_s.sstable_set_for_tombstone_gc();
|
||||
auto all_sstables = boost::copy_range<std::vector<shared_sstable>>(*sstable_set->all());
|
||||
auto all_sstables = boost::copy_range<std::vector<shared_sstable>>(*table_s.main_sstable_set().all());
|
||||
auto& compacted_undeleted = table_s.compacted_undeleted_sstables();
|
||||
all_sstables.insert(all_sstables.end(), compacted_undeleted.begin(), compacted_undeleted.end());
|
||||
boost::sort(all_sstables, [] (const shared_sstable& x, const shared_sstable& y) {
|
||||
@@ -452,7 +451,6 @@ protected:
|
||||
uint64_t _compacting_data_file_size = 0;
|
||||
api::timestamp_type _compacting_max_timestamp = api::min_timestamp;
|
||||
uint64_t _estimated_partitions = 0;
|
||||
double _estimated_droppable_tombstone_ratio = 0;
|
||||
uint64_t _bloom_filter_checks = 0;
|
||||
db::replay_position _rp;
|
||||
encoding_stats_collector _stats_collector;
|
||||
@@ -526,6 +524,9 @@ protected:
|
||||
, _owned_ranges_checker(_owned_ranges ? std::optional<dht::incremental_owned_ranges_checker>(*_owned_ranges) : std::nullopt)
|
||||
, _progress_monitor(progress_monitor)
|
||||
{
|
||||
for (auto& sst : _sstables) {
|
||||
_stats_collector.update(sst->get_encoding_stats_for_compaction());
|
||||
}
|
||||
std::unordered_set<run_id> ssts_run_ids;
|
||||
_contains_multi_fragment_runs = std::any_of(_sstables.begin(), _sstables.end(), [&ssts_run_ids] (shared_sstable& sst) {
|
||||
return !ssts_run_ids.insert(sst->run_identifier()).second;
|
||||
@@ -608,8 +609,7 @@ protected:
|
||||
sstable_writer_config cfg = _table_s.configure_writer("garbage_collection");
|
||||
cfg.run_identifier = gc_run;
|
||||
cfg.monitor = monitor.get();
|
||||
uint64_t estimated_partitions = std::max(1UL, uint64_t(ceil(partitions_per_sstable() * _estimated_droppable_tombstone_ratio)));
|
||||
auto writer = sst->get_writer(*schema(), estimated_partitions, cfg, get_encoding_stats());
|
||||
auto writer = sst->get_writer(*schema(), partitions_per_sstable(), cfg, get_encoding_stats());
|
||||
return compaction_writer(std::move(monitor), std::move(writer), std::move(sst));
|
||||
}
|
||||
|
||||
@@ -727,7 +727,6 @@ private:
|
||||
auto fully_expired = _table_s.fully_expired_sstables(_sstables, gc_clock::now());
|
||||
min_max_tracker<api::timestamp_type> timestamp_tracker;
|
||||
|
||||
double sum_of_estimated_droppable_tombstone_ratio = 0;
|
||||
_input_sstable_generations.reserve(_sstables.size());
|
||||
for (auto& sst : _sstables) {
|
||||
co_await coroutine::maybe_yield();
|
||||
@@ -746,7 +745,6 @@ private:
|
||||
log_debug("Fully expired sstable {} will be dropped on compaction completion", sst->get_filename());
|
||||
continue;
|
||||
}
|
||||
_stats_collector.update(sst->get_encoding_stats_for_compaction());
|
||||
|
||||
_cdata.compaction_size += sst->data_size();
|
||||
// We also capture the sstable, so we keep it alive while the read isn't done
|
||||
@@ -755,7 +753,6 @@ private:
|
||||
// for a better estimate for the number of partitions in the merged
|
||||
// sstable than just adding up the lengths of individual sstables.
|
||||
_estimated_partitions += sst->get_estimated_key_count();
|
||||
sum_of_estimated_droppable_tombstone_ratio += sst->estimate_droppable_tombstone_ratio(gc_clock::now(), _table_s.get_tombstone_gc_state(), _schema);
|
||||
_compacting_data_file_size += sst->ondisk_data_size();
|
||||
_compacting_max_timestamp = std::max(_compacting_max_timestamp, sst->get_stats_metadata().max_timestamp);
|
||||
if (sst->originated_on_this_node().value_or(false) && sst_stats.position.shard_id() == this_shard_id()) {
|
||||
@@ -767,8 +764,6 @@ private:
|
||||
log_debug("{} out of {} input sstables are fully expired sstables that will not be actually compacted",
|
||||
_sstables.size() - ssts->size(), _sstables.size());
|
||||
}
|
||||
// _estimated_droppable_tombstone_ratio could exceed 1.0 in certain cases, so limit it to 1.0.
|
||||
_estimated_droppable_tombstone_ratio = std::min(1.0, sum_of_estimated_droppable_tombstone_ratio / ssts->size());
|
||||
|
||||
_compacting = std::move(ssts);
|
||||
|
||||
@@ -1706,12 +1701,7 @@ public:
|
||||
}
|
||||
|
||||
compaction_writer create_compaction_writer(const dht::decorated_key& dk) override {
|
||||
auto shards = _sharder->shard_for_writes(dk.token());
|
||||
if (shards.size() != 1) {
|
||||
// Resharding is not supposed to run on tablets, so this case does not have to be supported.
|
||||
on_internal_error(clogger, fmt::format("Got {} shards for token {} in table {}.{}", shards.size(), dk.token(), _schema->ks_name(), _schema->cf_name()));
|
||||
}
|
||||
auto shard = shards[0];
|
||||
auto shard = _sharder->shard_of(dk.token());
|
||||
auto sst = _sstable_creator(shard);
|
||||
setup_new_sstable(sst);
|
||||
|
||||
|
||||
@@ -213,14 +213,14 @@ struct compaction_descriptor {
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<sstables::compaction_type> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<sstables::compaction_type> : fmt::formatter<std::string_view> {
|
||||
auto format(sstables::compaction_type, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
template <>
|
||||
struct fmt::formatter<sstables::compaction_type_options::scrub::mode> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<sstables::compaction_type_options::scrub::mode> : fmt::formatter<std::string_view> {
|
||||
auto format(sstables::compaction_type_options::scrub::mode, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
template <>
|
||||
struct fmt::formatter<sstables::compaction_type_options::scrub::quarantine_mode> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<sstables::compaction_type_options::scrub::quarantine_mode> : fmt::formatter<std::string_view> {
|
||||
auto format(sstables::compaction_type_options::scrub::quarantine_mode, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
#include "sstables/sstables.hh"
|
||||
#include "sstables/sstables_manager.hh"
|
||||
#include <memory>
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/core/metrics.hh>
|
||||
#include <seastar/core/coroutine.hh>
|
||||
#include <seastar/coroutine/switch_to.hh>
|
||||
@@ -387,26 +386,11 @@ future<sstables::compaction_result> compaction_task_executor::compact_sstables_a
|
||||
|
||||
co_return res;
|
||||
}
|
||||
|
||||
future<sstables::sstable_set> compaction_task_executor::sstable_set_for_tombstone_gc(table_state& t) {
|
||||
auto compound_set = t.sstable_set_for_tombstone_gc();
|
||||
// Compound set will be linearized into a single set, since compaction might add or remove sstables
|
||||
// to it for incremental compaction to work.
|
||||
auto new_set = sstables::make_partitioned_sstable_set(t.schema(), false);
|
||||
co_await compound_set->for_each_sstable_gently([&] (const sstables::shared_sstable& sst) {
|
||||
auto inserted = new_set.insert(sst);
|
||||
if (!inserted) {
|
||||
on_internal_error(cmlog, format("Unable to insert SSTable {} into set used for tombstone GC", sst->get_filename()));
|
||||
}
|
||||
});
|
||||
co_return std::move(new_set);
|
||||
}
|
||||
|
||||
future<sstables::compaction_result> compaction_task_executor::compact_sstables(sstables::compaction_descriptor descriptor, sstables::compaction_data& cdata, on_replacement& on_replace, compaction_manager::can_purge_tombstones can_purge,
|
||||
sstables::offstrategy offstrategy) {
|
||||
table_state& t = *_compacting_table;
|
||||
if (can_purge) {
|
||||
descriptor.enable_garbage_collection(co_await sstable_set_for_tombstone_gc(t));
|
||||
descriptor.enable_garbage_collection(t.main_sstable_set());
|
||||
}
|
||||
descriptor.creator = [&t] (shard_id dummy) {
|
||||
auto sst = t.make_sstable();
|
||||
@@ -504,7 +488,7 @@ public:
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
|
||||
virtual void abort() noexcept override {
|
||||
virtual future<> abort() noexcept override {
|
||||
return compaction_task_executor::abort(_as);
|
||||
}
|
||||
protected:
|
||||
@@ -529,7 +513,7 @@ public:
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
|
||||
virtual void abort() noexcept override {
|
||||
virtual future<> abort() noexcept override {
|
||||
return compaction_task_executor::abort(_as);
|
||||
}
|
||||
protected:
|
||||
@@ -566,14 +550,6 @@ protected:
|
||||
// the exclusive lock can be freed to let regular compaction run in parallel to major
|
||||
lock_holder.return_all();
|
||||
|
||||
co_await utils::get_local_injector().inject("major_compaction_wait", [this] (auto& handler) -> future<> {
|
||||
cmlog.info("major_compaction_wait: waiting");
|
||||
while (!handler.poll_for_message() && !_compaction_data.is_stop_requested()) {
|
||||
co_await sleep(std::chrono::milliseconds(5));
|
||||
}
|
||||
cmlog.info("major_compaction_wait: released");
|
||||
});
|
||||
|
||||
co_await compact_sstables_and_update_history(std::move(descriptor), _compaction_data, on_replace);
|
||||
|
||||
finish_compaction();
|
||||
@@ -652,7 +628,7 @@ public:
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
|
||||
virtual void abort() noexcept override {
|
||||
virtual future<> abort() noexcept override {
|
||||
return compaction_task_executor::abort(_as);
|
||||
}
|
||||
protected:
|
||||
@@ -878,11 +854,12 @@ void compaction_task_executor::finish_compaction(state finish_state) noexcept {
|
||||
_compaction_state.compaction_done.signal();
|
||||
}
|
||||
|
||||
void compaction_task_executor::abort(abort_source& as) noexcept {
|
||||
future<> compaction_task_executor::abort(abort_source& as) noexcept {
|
||||
if (!as.abort_requested()) {
|
||||
as.request_abort();
|
||||
stop_compaction("user requested abort");
|
||||
}
|
||||
return make_ready_future();
|
||||
}
|
||||
|
||||
void compaction_task_executor::stop_compaction(sstring reason) noexcept {
|
||||
@@ -999,11 +976,8 @@ void compaction_manager::enable() {
|
||||
|
||||
std::function<void()> compaction_manager::compaction_submission_callback() {
|
||||
return [this] () mutable {
|
||||
auto now = gc_clock::now();
|
||||
for (auto& [table, state] : _compaction_state) {
|
||||
if (now - state.last_regular_compaction > periodic_compaction_submission_interval()) {
|
||||
postpone_compaction_for_table(table);
|
||||
}
|
||||
for (auto& e: _compaction_state) {
|
||||
postpone_compaction_for_table(e.first);
|
||||
}
|
||||
reevaluate_postponed_compactions();
|
||||
};
|
||||
@@ -1203,7 +1177,7 @@ public:
|
||||
, regular_compaction_task_impl(mgr._task_manager_module, tasks::task_id::create_random_id(), mgr._task_manager_module->new_sequence_number(), t.schema()->ks_name(), t.schema()->cf_name(), "", tasks::task_id::create_null_id())
|
||||
{}
|
||||
|
||||
virtual void abort() noexcept override {
|
||||
virtual future<> abort() noexcept override {
|
||||
return compaction_task_executor::abort(_as);
|
||||
}
|
||||
protected:
|
||||
@@ -1253,7 +1227,6 @@ protected:
|
||||
fmt::ptr(this), descriptor.sstables.size(), weight, t);
|
||||
|
||||
setup_new_compaction(descriptor.run_identifier);
|
||||
_compaction_state.last_regular_compaction = gc_clock::now();
|
||||
std::exception_ptr ex;
|
||||
|
||||
try {
|
||||
@@ -1374,7 +1347,7 @@ public:
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
|
||||
virtual void abort() noexcept override {
|
||||
virtual future<> abort() noexcept override {
|
||||
return compaction_task_executor::abort(_as);
|
||||
}
|
||||
protected:
|
||||
@@ -1401,20 +1374,13 @@ private:
|
||||
}));
|
||||
};
|
||||
|
||||
auto get_next_job = [&] () -> future<std::optional<sstables::compaction_descriptor>> {
|
||||
auto candidates = get_reshape_candidates();
|
||||
if (candidates.empty()) {
|
||||
co_return std::nullopt;
|
||||
}
|
||||
// all sstables added to maintenance set share the same underlying storage.
|
||||
auto& storage = candidates.front()->get_storage();
|
||||
sstables::reshape_config cfg = co_await sstables::make_reshape_config(storage, sstables::reshape_mode::strict);
|
||||
auto desc = t.get_compaction_strategy().get_reshaping_job(get_reshape_candidates(), t.schema(), cfg);
|
||||
co_return desc.sstables.size() ? std::make_optional(std::move(desc)) : std::nullopt;
|
||||
auto get_next_job = [&] () -> std::optional<sstables::compaction_descriptor> {
|
||||
auto desc = t.get_compaction_strategy().get_reshaping_job(get_reshape_candidates(), t.schema(), sstables::reshape_mode::strict);
|
||||
return desc.sstables.size() ? std::make_optional(std::move(desc)) : std::nullopt;
|
||||
};
|
||||
|
||||
std::exception_ptr err;
|
||||
while (auto desc = co_await get_next_job()) {
|
||||
while (auto desc = get_next_job()) {
|
||||
auto compacting = compacting_sstable_registration(_cm, _cm.get_compaction_state(&t), desc->sstables);
|
||||
auto on_replace = compacting.update_on_sstable_replacement();
|
||||
|
||||
@@ -1546,16 +1512,11 @@ protected:
|
||||
co_return stats;
|
||||
}
|
||||
|
||||
static sstables::compaction_descriptor
|
||||
make_descriptor(const sstables::shared_sstable& sst, const sstables::compaction_type_options& opt, owned_ranges_ptr owned_ranges = {}) {
|
||||
virtual sstables::compaction_descriptor make_descriptor(const sstables::shared_sstable& sst) const {
|
||||
auto sstable_level = sst->get_sstable_level();
|
||||
auto run_identifier = sst->run_identifier();
|
||||
return sstables::compaction_descriptor({ sst },
|
||||
sstable_level, sstables::compaction_descriptor::default_max_sstable_bytes, run_identifier, opt, owned_ranges);
|
||||
}
|
||||
|
||||
virtual sstables::compaction_descriptor make_descriptor(const sstables::shared_sstable& sst) const {
|
||||
return make_descriptor(sst, _options, _owned_ranges_ptr);
|
||||
sstable_level, sstables::compaction_descriptor::default_max_sstable_bytes, run_identifier, _options, _owned_ranges_ptr);
|
||||
}
|
||||
|
||||
virtual future<sstables::compaction_result> rewrite_sstable(const sstables::shared_sstable sst) {
|
||||
@@ -1608,30 +1569,19 @@ public:
|
||||
std::move(sstables), std::move(compacting), compaction_manager::can_purge_tombstones::yes)
|
||||
, _opt(options.as<sstables::compaction_type_options::split>())
|
||||
{
|
||||
if (utils::get_local_injector().is_enabled("split_sstable_rewrite")) {
|
||||
_do_throw_if_stopping = throw_if_stopping::yes;
|
||||
}
|
||||
}
|
||||
|
||||
static bool sstable_needs_split(const sstables::shared_sstable& sst, const sstables::compaction_type_options::split& opt) {
|
||||
return opt.classifier(sst->get_first_decorated_key().token()) != opt.classifier(sst->get_last_decorated_key().token());
|
||||
}
|
||||
|
||||
static sstables::compaction_descriptor
|
||||
make_descriptor(const sstables::shared_sstable& sst, const sstables::compaction_type_options::split& split_opt) {
|
||||
auto opt = sstables::compaction_type_options::make_split(split_opt.classifier);
|
||||
return rewrite_sstables_compaction_task_executor::make_descriptor(sst, std::move(opt));
|
||||
}
|
||||
private:
|
||||
bool sstable_needs_split(const sstables::shared_sstable& sst) const {
|
||||
return sstable_needs_split(sst, _opt);
|
||||
return _opt.classifier(sst->get_first_decorated_key().token()) != _opt.classifier(sst->get_last_decorated_key().token());
|
||||
}
|
||||
protected:
|
||||
sstables::compaction_descriptor make_descriptor(const sstables::shared_sstable& sst) const override {
|
||||
return make_descriptor(sst, _opt);
|
||||
auto desc = rewrite_sstables_compaction_task_executor::make_descriptor(sst);
|
||||
desc.options = sstables::compaction_type_options::make_split(_opt.classifier);
|
||||
return desc;
|
||||
}
|
||||
|
||||
future<sstables::compaction_result> do_rewrite_sstable(const sstables::shared_sstable sst) {
|
||||
future<sstables::compaction_result> rewrite_sstable(const sstables::shared_sstable sst) override {
|
||||
if (sstable_needs_split(sst)) {
|
||||
return rewrite_sstables_compaction_task_executor::rewrite_sstable(std::move(sst));
|
||||
}
|
||||
@@ -1644,20 +1594,6 @@ protected:
|
||||
return sstables::compaction_result{};
|
||||
});
|
||||
}
|
||||
|
||||
future<sstables::compaction_result> rewrite_sstable(const sstables::shared_sstable sst) override {
|
||||
co_await utils::get_local_injector().inject("split_sstable_rewrite", [this] (auto& handler) -> future<> {
|
||||
cmlog.info("split_sstable_rewrite: waiting");
|
||||
while (!handler.poll_for_message() && !_compaction_data.is_stop_requested()) {
|
||||
co_await sleep(std::chrono::milliseconds(5));
|
||||
}
|
||||
cmlog.info("split_sstable_rewrite: released");
|
||||
if (_compaction_data.is_stop_requested()) {
|
||||
throw make_compaction_stopped_exception();
|
||||
}
|
||||
}, false);
|
||||
co_return co_await do_rewrite_sstable(std::move(sst));
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
@@ -1814,7 +1750,7 @@ public:
|
||||
return compaction_task_impl::get_progress(_compaction_data, _progress_monitor);
|
||||
}
|
||||
|
||||
virtual void abort() noexcept override {
|
||||
virtual future<> abort() noexcept override {
|
||||
return compaction_task_executor::abort(_as);
|
||||
}
|
||||
protected:
|
||||
@@ -2079,31 +2015,6 @@ future<compaction_manager::compaction_stats_opt> compaction_manager::perform_spl
|
||||
return perform_task_on_all_files<split_compaction_task_executor>(info, t, std::move(options), std::move(owned_ranges_ptr), std::move(get_sstables));
|
||||
}
|
||||
|
||||
future<std::vector<sstables::shared_sstable>>
|
||||
compaction_manager::maybe_split_sstable(sstables::shared_sstable sst, table_state& t, sstables::compaction_type_options::split opt) {
|
||||
if (!split_compaction_task_executor::sstable_needs_split(sst, opt)) {
|
||||
co_return std::vector<sstables::shared_sstable>{sst};
|
||||
}
|
||||
std::vector<sstables::shared_sstable> ret;
|
||||
|
||||
// FIXME: indentation.
|
||||
auto gate = get_compaction_state(&t).gate.hold();
|
||||
sstables::compaction_progress_monitor monitor;
|
||||
sstables::compaction_data info = create_compaction_data();
|
||||
sstables::compaction_descriptor desc = split_compaction_task_executor::make_descriptor(sst, opt);
|
||||
desc.creator = [&t] (shard_id _) {
|
||||
return t.make_sstable();
|
||||
};
|
||||
desc.replacer = [&] (sstables::compaction_completion_desc d) {
|
||||
std::move(d.new_sstables.begin(), d.new_sstables.end(), std::back_inserter(ret));
|
||||
};
|
||||
|
||||
co_await sstables::compact_sstables(std::move(desc), info, t, monitor);
|
||||
co_await sst->unlink();
|
||||
|
||||
co_return ret;
|
||||
}
|
||||
|
||||
// Submit a table to be scrubbed and wait for its termination.
|
||||
future<compaction_manager::compaction_stats_opt> compaction_manager::perform_sstable_scrub(table_state& t, sstables::compaction_type_options::scrub opts, std::optional<tasks::task_info> info) {
|
||||
auto scrub_mode = opts.operation_mode;
|
||||
|
||||
@@ -350,11 +350,6 @@ public:
|
||||
// or user aborted splitting using stop API.
|
||||
future<compaction_stats_opt> perform_split_compaction(compaction::table_state& t, sstables::compaction_type_options::split opt, std::optional<tasks::task_info> info = std::nullopt);
|
||||
|
||||
// Splits a single SSTable by segregating all its data according to the classifier.
|
||||
// If SSTable doesn't need split, the same input SSTable is returned as output.
|
||||
// If SSTable needs split, then output SSTables are returned and the input SSTable is deleted.
|
||||
future<std::vector<sstables::shared_sstable>> maybe_split_sstable(sstables::shared_sstable sst, table_state& t, sstables::compaction_type_options::split opt);
|
||||
|
||||
// Run a custom job for a given table, defined by a function
|
||||
// it completes when future returned by job is ready or returns immediately
|
||||
// if manager was asked to stop.
|
||||
@@ -594,14 +589,12 @@ private:
|
||||
future<compaction_manager::compaction_stats_opt> compaction_done() noexcept {
|
||||
return _compaction_done.get_future();
|
||||
}
|
||||
|
||||
future<sstables::sstable_set> sstable_set_for_tombstone_gc(::compaction::table_state& t);
|
||||
public:
|
||||
bool stopping() const noexcept {
|
||||
return _compaction_data.abort.abort_requested();
|
||||
}
|
||||
|
||||
void abort(abort_source& as) noexcept;
|
||||
future<> abort(abort_source& as) noexcept;
|
||||
|
||||
void stop_compaction(sstring reason) noexcept;
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
#include "compaction/compaction_fwd.hh"
|
||||
#include "compaction/compaction_backlog_manager.hh"
|
||||
#include "gc_clock.hh"
|
||||
|
||||
namespace compaction {
|
||||
|
||||
@@ -38,8 +37,6 @@ struct compaction_state {
|
||||
std::unordered_set<sstables::shared_sstable> sstables_requiring_cleanup;
|
||||
compaction::owned_ranges_ptr owned_ranges_ptr;
|
||||
|
||||
gc_clock::time_point last_regular_compaction;
|
||||
|
||||
explicit compaction_state(table_state& t);
|
||||
compaction_state(compaction_state&&) = delete;
|
||||
~compaction_state();
|
||||
|
||||
@@ -11,9 +11,8 @@
|
||||
|
||||
#include <vector>
|
||||
#include <chrono>
|
||||
#include <fmt/ranges.h>
|
||||
#include <seastar/core/shared_ptr.hh>
|
||||
#include <seastar/core/on_internal_error.hh>
|
||||
#include "seastar/core/on_internal_error.hh"
|
||||
#include "sstables/shared_sstable.hh"
|
||||
#include "sstables/sstables.hh"
|
||||
#include "compaction_strategy.hh"
|
||||
@@ -32,7 +31,6 @@
|
||||
#include "compaction_backlog_manager.hh"
|
||||
#include "size_tiered_backlog_tracker.hh"
|
||||
#include "leveled_manifest.hh"
|
||||
#include "utils/to_string.hh"
|
||||
|
||||
logging::logger leveled_manifest::logger("LeveledManifest");
|
||||
|
||||
@@ -83,7 +81,7 @@ reader_consumer_v2 compaction_strategy_impl::make_interposer_consumer(const muta
|
||||
}
|
||||
|
||||
compaction_descriptor
|
||||
compaction_strategy_impl::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const {
|
||||
compaction_strategy_impl::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const {
|
||||
return compaction_descriptor();
|
||||
}
|
||||
|
||||
@@ -150,7 +148,7 @@ static bool validate_unchecked_tombstone_compaction(const std::map<sstring, sstr
|
||||
auto tmp_value = compaction_strategy_impl::get_value(options, compaction_strategy_impl::UNCHECKED_TOMBSTONE_COMPACTION_OPTION);
|
||||
if (tmp_value.has_value()) {
|
||||
if (tmp_value != "true" && tmp_value != "false") {
|
||||
throw exceptions::configuration_exception(fmt::format("{} value ({}) must be \"true\" or \"false\"", compaction_strategy_impl::UNCHECKED_TOMBSTONE_COMPACTION_OPTION, *tmp_value));
|
||||
throw exceptions::configuration_exception(fmt::format("{} value ({}) must be \"true\" or \"false\"", compaction_strategy_impl::UNCHECKED_TOMBSTONE_COMPACTION_OPTION, tmp_value));
|
||||
}
|
||||
unchecked_tombstone_compaction = tmp_value == "true";
|
||||
}
|
||||
@@ -628,7 +626,7 @@ leveled_compaction_strategy::calculate_max_sstable_size_in_mb(std::optional<sstr
|
||||
max_size);
|
||||
} else if (max_size < 50) {
|
||||
leveled_manifest::logger.warn("Max sstable size of {}MB is configured. Testing done for CASSANDRA-5727 indicates that performance" \
|
||||
" improves up to 160MB", max_size);
|
||||
"improves up to 160MB", max_size);
|
||||
}
|
||||
return max_size;
|
||||
}
|
||||
@@ -728,8 +726,8 @@ compaction_backlog_tracker compaction_strategy::make_backlog_tracker() const {
|
||||
}
|
||||
|
||||
sstables::compaction_descriptor
|
||||
compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const {
|
||||
return _compaction_strategy_impl->get_reshaping_job(std::move(input), schema, cfg);
|
||||
compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const {
|
||||
return _compaction_strategy_impl->get_reshaping_job(std::move(input), schema, mode);
|
||||
}
|
||||
|
||||
uint64_t compaction_strategy::adjust_partition_estimate(const mutation_source_metadata& ms_meta, uint64_t partition_estimate, schema_ptr schema) const {
|
||||
@@ -767,13 +765,6 @@ compaction_strategy make_compaction_strategy(compaction_strategy_type strategy,
|
||||
return compaction_strategy(std::move(impl));
|
||||
}
|
||||
|
||||
future<reshape_config> make_reshape_config(const sstables::storage& storage, reshape_mode mode) {
|
||||
co_return sstables::reshape_config{
|
||||
.mode = mode,
|
||||
.free_storage_space = co_await storage.free_space() / smp::count,
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace compaction {
|
||||
|
||||
@@ -30,7 +30,6 @@ class compaction_strategy_impl;
|
||||
class sstable;
|
||||
class sstable_set;
|
||||
struct compaction_descriptor;
|
||||
class storage;
|
||||
|
||||
class compaction_strategy {
|
||||
::shared_ptr<compaction_strategy_impl> _compaction_strategy_impl;
|
||||
@@ -122,13 +121,11 @@ public:
|
||||
//
|
||||
// The caller should also pass a maximum number of SSTables which is the maximum amount of
|
||||
// SSTables that can be added into a single job.
|
||||
compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const;
|
||||
compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const;
|
||||
|
||||
};
|
||||
|
||||
// Creates a compaction_strategy object from one of the strategies available.
|
||||
compaction_strategy make_compaction_strategy(compaction_strategy_type strategy, const std::map<sstring, sstring>& options);
|
||||
|
||||
future<reshape_config> make_reshape_config(const sstables::storage& storage, reshape_mode mode);
|
||||
|
||||
}
|
||||
|
||||
@@ -76,6 +76,6 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const;
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
namespace sstables {
|
||||
|
||||
enum class compaction_strategy_type {
|
||||
@@ -20,10 +18,4 @@ enum class compaction_strategy_type {
|
||||
};
|
||||
|
||||
enum class reshape_mode { strict, relaxed };
|
||||
|
||||
struct reshape_config {
|
||||
reshape_mode mode;
|
||||
const uint64_t free_storage_space;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -146,8 +146,7 @@ int64_t leveled_compaction_strategy::estimated_pending_compactions(table_state&
|
||||
}
|
||||
|
||||
compaction_descriptor
|
||||
leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const {
|
||||
auto mode = cfg.mode;
|
||||
leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const {
|
||||
std::array<std::vector<shared_sstable>, leveled_manifest::MAX_LEVELS> level_info;
|
||||
|
||||
auto is_disjoint = [schema] (const std::vector<shared_sstable>& sstables, unsigned tolerance) -> std::tuple<bool, unsigned> {
|
||||
@@ -204,7 +203,7 @@ leveled_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input
|
||||
|
||||
if (level_info[0].size() > offstrategy_threshold) {
|
||||
size_tiered_compaction_strategy stcs(_stcs_options);
|
||||
return stcs.get_reshaping_job(std::move(level_info[0]), schema, cfg);
|
||||
return stcs.get_reshaping_job(std::move(level_info[0]), schema, mode);
|
||||
}
|
||||
|
||||
for (unsigned level = leveled_manifest::MAX_LEVELS - 1; level > 0; --level) {
|
||||
|
||||
@@ -74,7 +74,7 @@ public:
|
||||
|
||||
virtual std::unique_ptr<compaction_backlog_tracker::impl> make_backlog_tracker() const override;
|
||||
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const override;
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const override;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -298,9 +298,8 @@ size_tiered_compaction_strategy::most_interesting_bucket(const std::vector<sstab
|
||||
}
|
||||
|
||||
compaction_descriptor
|
||||
size_tiered_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const
|
||||
size_tiered_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const
|
||||
{
|
||||
auto mode = cfg.mode;
|
||||
size_t offstrategy_threshold = std::max(schema->min_compaction_threshold(), 4);
|
||||
size_t max_sstables = std::max(schema->max_compaction_threshold(), int(offstrategy_threshold));
|
||||
|
||||
|
||||
@@ -96,7 +96,7 @@ public:
|
||||
|
||||
virtual std::unique_ptr<compaction_backlog_tracker::impl> make_backlog_tracker() const override;
|
||||
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const override;
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const override;
|
||||
|
||||
friend class ::size_tiered_backlog_tracker;
|
||||
};
|
||||
|
||||
@@ -39,7 +39,6 @@ public:
|
||||
virtual bool compaction_enforce_min_threshold() const noexcept = 0;
|
||||
virtual const sstables::sstable_set& main_sstable_set() const = 0;
|
||||
virtual const sstables::sstable_set& maintenance_sstable_set() const = 0;
|
||||
virtual lw_shared_ptr<const sstables::sstable_set> sstable_set_for_tombstone_gc() const = 0;
|
||||
virtual std::unordered_set<sstables::shared_sstable> fully_expired_sstables(const std::vector<sstables::shared_sstable>& sstables, gc_clock::time_point compaction_time) const = 0;
|
||||
virtual const std::vector<sstables::shared_sstable>& compacted_undeleted_sstables() const noexcept = 0;
|
||||
virtual sstables::compaction_strategy& get_compaction_strategy() const noexcept = 0;
|
||||
@@ -64,7 +63,7 @@ public:
|
||||
namespace fmt {
|
||||
|
||||
template <>
|
||||
struct formatter<compaction::table_state> : formatter<string_view> {
|
||||
struct formatter<compaction::table_state> : formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const compaction::table_state& t, FormatContext& ctx) const {
|
||||
auto s = t.schema();
|
||||
|
||||
@@ -159,9 +159,9 @@ future<> reshard(sstables::sstable_directory& dir, sstables::sstable_directory::
|
||||
// There is a semaphore inside the compaction manager in run_resharding_jobs. So we
|
||||
// parallel_for_each so the statistics about pending jobs are updated to reflect all
|
||||
// jobs. But only one will run in parallel at a time
|
||||
auto& t = table.try_get_table_state_with_static_sharding();
|
||||
auto& t = table.as_table_state();
|
||||
co_await coroutine::parallel_for_each(buckets, [&] (std::vector<sstables::shared_sstable>& sstlist) mutable {
|
||||
return table.get_compaction_manager().run_custom_job(t, sstables::compaction_type::Reshard, "Reshard compaction", [&] (sstables::compaction_data& info, sstables::compaction_progress_monitor& progress_monitor) -> future<> {
|
||||
return table.get_compaction_manager().run_custom_job(table.as_table_state(), sstables::compaction_type::Reshard, "Reshard compaction", [&] (sstables::compaction_data& info, sstables::compaction_progress_monitor& progress_monitor) -> future<> {
|
||||
auto erm = table.get_effective_replication_map(); // keep alive around compaction.
|
||||
|
||||
sstables::compaction_descriptor desc(sstlist);
|
||||
@@ -595,35 +595,28 @@ future<> table_reshaping_compaction_task_impl::run() {
|
||||
|
||||
future<> shard_reshaping_compaction_task_impl::run() {
|
||||
auto& table = _db.local().find_column_family(_status.keyspace, _status.table);
|
||||
auto holder = table.async_gate().hold();
|
||||
tasks::task_info info{_status.id, _status.shard};
|
||||
|
||||
std::unordered_map<compaction::table_state*, std::unordered_set<sstables::shared_sstable>> sstables_grouped_by_compaction_group;
|
||||
std::unordered_map<size_t, std::unordered_set<sstables::shared_sstable>> sstables_grouped_by_compaction_group;
|
||||
for (auto& sstable : _dir.get_unshared_local_sstables()) {
|
||||
auto& t = table.table_state_for_sstable(sstable);
|
||||
sstables_grouped_by_compaction_group[&t].insert(sstable);
|
||||
auto compaction_group_id = table.get_compaction_group_id_for_sstable(sstable);
|
||||
sstables_grouped_by_compaction_group[compaction_group_id].insert(sstable);
|
||||
}
|
||||
|
||||
// reshape sstables individually within the compaction groups
|
||||
for (auto& sstables_in_cg : sstables_grouped_by_compaction_group) {
|
||||
co_await reshape_compaction_group(*sstables_in_cg.first, sstables_in_cg.second, table, info);
|
||||
for (auto& sstables_in_cg : sstables_grouped_by_compaction_group | boost::adaptors::map_values) {
|
||||
co_await reshape_compaction_group(sstables_in_cg, table, info);
|
||||
}
|
||||
}
|
||||
|
||||
future<> shard_reshaping_compaction_task_impl::reshape_compaction_group(compaction::table_state& t, std::unordered_set<sstables::shared_sstable>& sstables_in_cg, replica::column_family& table, const tasks::task_info& info) {
|
||||
future<> shard_reshaping_compaction_task_impl::reshape_compaction_group(std::unordered_set<sstables::shared_sstable>& sstables_in_cg, replica::column_family& table, const tasks::task_info& info) {
|
||||
|
||||
while (true) {
|
||||
auto reshape_candidates = boost::copy_range<std::vector<sstables::shared_sstable>>(sstables_in_cg
|
||||
| boost::adaptors::filtered([&filter = _filter] (const auto& sst) {
|
||||
return filter(sst);
|
||||
}));
|
||||
if (reshape_candidates.empty()) {
|
||||
break;
|
||||
}
|
||||
// all sstables were found in the same sstable_directory instance, so they share the same underlying storage.
|
||||
auto& storage = reshape_candidates.front()->get_storage();
|
||||
auto cfg = co_await sstables::make_reshape_config(storage, _mode);
|
||||
auto desc = table.get_compaction_strategy().get_reshaping_job(std::move(reshape_candidates), table.schema(), cfg);
|
||||
auto desc = table.get_compaction_strategy().get_reshaping_job(std::move(reshape_candidates), table.schema(), _mode);
|
||||
if (desc.sstables.empty()) {
|
||||
break;
|
||||
}
|
||||
@@ -642,8 +635,8 @@ future<> shard_reshaping_compaction_task_impl::reshape_compaction_group(compacti
|
||||
desc.creator = _creator;
|
||||
|
||||
try {
|
||||
co_await table.get_compaction_manager().run_custom_job(t, sstables::compaction_type::Reshape, "Reshape compaction", [&dir = _dir, sstlist = std::move(sstlist), desc = std::move(desc), &sstables_in_cg, &t] (sstables::compaction_data& info, sstables::compaction_progress_monitor& progress_monitor) mutable -> future<> {
|
||||
sstables::compaction_result result = co_await sstables::compact_sstables(std::move(desc), info, t, progress_monitor);
|
||||
co_await table.get_compaction_manager().run_custom_job(table.as_table_state(), sstables::compaction_type::Reshape, "Reshape compaction", [&dir = _dir, &table, sstlist = std::move(sstlist), desc = std::move(desc), &sstables_in_cg] (sstables::compaction_data& info, sstables::compaction_progress_monitor& progress_monitor) mutable -> future<> {
|
||||
sstables::compaction_result result = co_await sstables::compact_sstables(std::move(desc), info, table.as_table_state(), progress_monitor);
|
||||
// update the sstables_in_cg set with new sstables and remove the reshaped ones
|
||||
for (auto& sst : sstlist) {
|
||||
sstables_in_cg.erase(sst);
|
||||
|
||||
@@ -606,7 +606,7 @@ private:
|
||||
std::function<bool (const sstables::shared_sstable&)> _filter;
|
||||
uint64_t& _total_shard_size;
|
||||
|
||||
future<> reshape_compaction_group(compaction::table_state& t, std::unordered_set<sstables::shared_sstable>& sstables_in_cg, replica::column_family& table, const tasks::task_info& info);
|
||||
future<> reshape_compaction_group(std::unordered_set<sstables::shared_sstable>& sstables_in_cg, replica::column_family& table, const tasks::task_info& info);
|
||||
public:
|
||||
shard_reshaping_compaction_task_impl(tasks::task_manager::module_ptr module,
|
||||
std::string keyspace,
|
||||
|
||||
@@ -226,14 +226,12 @@ reader_consumer_v2 time_window_compaction_strategy::make_interposer_consumer(con
|
||||
}
|
||||
|
||||
compaction_descriptor
|
||||
time_window_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const {
|
||||
auto mode = cfg.mode;
|
||||
time_window_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const {
|
||||
std::vector<shared_sstable> single_window;
|
||||
std::vector<shared_sstable> multi_window;
|
||||
|
||||
size_t offstrategy_threshold = std::max(schema->min_compaction_threshold(), 4);
|
||||
size_t max_sstables = std::max(schema->max_compaction_threshold(), int(offstrategy_threshold));
|
||||
const uint64_t target_job_size = cfg.free_storage_space * reshape_target_space_overhead;
|
||||
|
||||
if (mode == reshape_mode::relaxed) {
|
||||
offstrategy_threshold = max_sstables;
|
||||
@@ -265,41 +263,22 @@ time_window_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
multi_window.size(), !multi_window.empty() && sstable_set_overlapping_count(schema, multi_window) == 0,
|
||||
single_window.size(), !single_window.empty() && sstable_set_overlapping_count(schema, single_window) == 0);
|
||||
|
||||
auto get_job_size = [] (const std::vector<shared_sstable>& ssts) {
|
||||
return boost::accumulate(ssts | boost::adaptors::transformed(std::mem_fn(&sstable::bytes_on_disk)), uint64_t(0));
|
||||
};
|
||||
|
||||
// Targets a space overhead of 10%. All disjoint sstables can be compacted together as long as they won't
|
||||
// cause an overhead above target. Otherwise, the job targets a maximum of #max_threshold sstables.
|
||||
auto need_trimming = [&] (const std::vector<shared_sstable>& ssts, const uint64_t job_size, bool is_disjoint) {
|
||||
const size_t min_sstables = 2;
|
||||
auto is_above_target_size = job_size > target_job_size;
|
||||
|
||||
return (ssts.size() > max_sstables && !is_disjoint) ||
|
||||
(ssts.size() > min_sstables && is_above_target_size);
|
||||
};
|
||||
|
||||
auto maybe_trim_job = [&need_trimming] (std::vector<shared_sstable>& ssts, uint64_t job_size, bool is_disjoint) {
|
||||
while (need_trimming(ssts, job_size, is_disjoint)) {
|
||||
auto sst = ssts.back();
|
||||
ssts.pop_back();
|
||||
job_size -= sst->bytes_on_disk();
|
||||
}
|
||||
auto need_trimming = [max_sstables, schema, &is_disjoint] (const std::vector<shared_sstable>& ssts) {
|
||||
// All sstables can be compacted at once if they're disjoint, given that partitioned set
|
||||
// will incrementally open sstables which translates into bounded memory usage.
|
||||
return ssts.size() > max_sstables && !is_disjoint(ssts);
|
||||
};
|
||||
|
||||
if (!multi_window.empty()) {
|
||||
auto disjoint = is_disjoint(multi_window);
|
||||
auto job_size = get_job_size(multi_window);
|
||||
// Everything that spans multiple windows will need reshaping
|
||||
if (need_trimming(multi_window, job_size, disjoint)) {
|
||||
if (need_trimming(multi_window)) {
|
||||
// When trimming, let's keep sstables with overlapping time window, so as to reduce write amplification.
|
||||
// For example, if there are N sstables spanning window W, where N <= 32, then we can produce all data for W
|
||||
// in a single compaction round, removing the need to later compact W to reduce its number of files.
|
||||
auto sort_size = std::min(max_sstables, multi_window.size());
|
||||
boost::partial_sort(multi_window, multi_window.begin() + sort_size, [](const shared_sstable &a, const shared_sstable &b) {
|
||||
boost::partial_sort(multi_window, multi_window.begin() + max_sstables, [](const shared_sstable &a, const shared_sstable &b) {
|
||||
return a->get_stats_metadata().max_timestamp < b->get_stats_metadata().max_timestamp;
|
||||
});
|
||||
maybe_trim_job(multi_window, job_size, disjoint);
|
||||
multi_window.resize(max_sstables);
|
||||
}
|
||||
compaction_descriptor desc(std::move(multi_window));
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
@@ -318,17 +297,15 @@ time_window_compaction_strategy::get_reshaping_job(std::vector<shared_sstable> i
|
||||
std::copy(ssts.begin(), ssts.end(), std::back_inserter(single_window));
|
||||
continue;
|
||||
}
|
||||
|
||||
// reuse STCS reshape logic which will only compact similar-sized files, to increase overall efficiency
|
||||
// when reshaping time buckets containing a huge amount of files
|
||||
auto desc = size_tiered_compaction_strategy(_stcs_options).get_reshaping_job(std::move(ssts), schema, cfg);
|
||||
auto desc = size_tiered_compaction_strategy(_stcs_options).get_reshaping_job(std::move(ssts), schema, mode);
|
||||
if (!desc.sstables.empty()) {
|
||||
return desc;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!single_window.empty()) {
|
||||
maybe_trim_job(single_window, get_job_size(single_window), all_disjoint);
|
||||
compaction_descriptor desc(std::move(single_window));
|
||||
desc.options = compaction_type_options::make_reshape();
|
||||
return desc;
|
||||
|
||||
@@ -76,7 +76,6 @@ public:
|
||||
// To prevent an explosion in the number of sstables we cap it.
|
||||
// Better co-locate some windows into the same sstables than OOM.
|
||||
static constexpr uint64_t max_data_segregation_window_count = 100;
|
||||
static constexpr float reshape_target_space_overhead = 0.1f;
|
||||
|
||||
using bucket_t = std::vector<shared_sstable>;
|
||||
enum class bucket_compaction_mode { none, size_tiered, major };
|
||||
@@ -169,7 +168,7 @@ public:
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_config cfg) const override;
|
||||
virtual compaction_descriptor get_reshaping_job(std::vector<shared_sstable> input, schema_ptr schema, reshape_mode mode) const override;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@@ -524,7 +524,7 @@ public:
|
||||
};
|
||||
|
||||
template <typename Component>
|
||||
struct fmt::formatter<std::pair<Component, composite::eoc>> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<std::pair<Component, composite::eoc>> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const std::pair<Component, composite::eoc>& c, FormatContext& ctx) const {
|
||||
if constexpr (std::same_as<Component, bytes_view>) {
|
||||
@@ -636,7 +636,7 @@ public:
|
||||
};
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<composite_view> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<composite_view> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const composite_view& v, FormatContext& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{{{}, compound={}, static={}}}",
|
||||
@@ -650,7 +650,7 @@ composite::composite(const composite_view& v)
|
||||
{ }
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<composite> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<composite> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(const composite& v, FormatContext& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{}", composite_view(v));
|
||||
|
||||
@@ -278,8 +278,10 @@ batch_size_fail_threshold_in_kb: 1024
|
||||
# experimental_features:
|
||||
# - udf
|
||||
# - alternator-streams
|
||||
# - consistent-topology-changes
|
||||
# - broadcast-tables
|
||||
# - keyspace-storage-options
|
||||
# - tablets
|
||||
|
||||
# The directory where hints files are stored if hinted handoff is enabled.
|
||||
# hints_directory: /var/lib/scylla/hints
|
||||
@@ -558,7 +560,7 @@ murmur3_partitioner_ignore_msb_bits: 12
|
||||
# enable_parallelized_aggregation: true
|
||||
|
||||
# Time for which task manager task is kept in memory after it completes.
|
||||
# task_ttl_in_seconds: 0
|
||||
task_ttl_in_seconds: 10
|
||||
|
||||
# In materialized views, restrictions are allowed only on the view's primary key columns.
|
||||
# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part
|
||||
@@ -617,17 +619,3 @@ maintenance_socket: ignore
|
||||
# replication_strategy_warn_list:
|
||||
# - SimpleStrategy
|
||||
# replication_strategy_fail_list:
|
||||
|
||||
# Enables the tablets feature.
|
||||
# When enabled, newly created keyspaces will have tablets enabled by default.
|
||||
# That can be explicitly disabled in the CREATE KEYSPACE query
|
||||
# by using the `tablets = {'enabled': false}` replication option.
|
||||
#
|
||||
# When the tablets feature is disabled, there is no way to enable tablets
|
||||
# per keyspace.
|
||||
#
|
||||
# Note that creating keyspaces with tablets enabled is irreversible.
|
||||
# Disabling the tablets feature may impact existing keyspaces that were created with tablets.
|
||||
# For example, the tablets map would remain "frozen" and will not respond to topology changes
|
||||
# like adding, removing, or replacing nodes, or to replication factor changes.
|
||||
enable_tablets: true
|
||||
|
||||
281
configure.py
281
configure.py
@@ -20,6 +20,9 @@ import textwrap
|
||||
from shutil import which
|
||||
from typing import NamedTuple
|
||||
|
||||
outdir = 'build'
|
||||
|
||||
tempfile.tempdir = f"{outdir}/tmp"
|
||||
|
||||
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:] if not x.startswith('--out=')])
|
||||
|
||||
@@ -282,9 +285,9 @@ def generate_compdb(compdb, ninja, buildfile, modes):
|
||||
# build mode-specific compdbs
|
||||
for mode in modes:
|
||||
mode_out = outdir + '/' + mode
|
||||
submodule_compdbs = [mode_out + '/' + submodule + '/' + compdb for submodule in ['seastar', 'abseil']]
|
||||
submodule_compdbs = [mode_out + '/' + submodule + '/' + compdb for submodule in ['seastar']]
|
||||
with open(mode_out + '/' + compdb, 'w+b') as combined_mode_specific_compdb:
|
||||
subprocess.run(['./scripts/merge-compdb.py', outdir + '/' + mode,
|
||||
subprocess.run(['./scripts/merge-compdb.py', 'build/' + mode,
|
||||
ninja_compdb.name] + submodule_compdbs, stdout=combined_mode_specific_compdb)
|
||||
|
||||
# sort modes by supposed indexing speed
|
||||
@@ -456,6 +459,8 @@ modes = {
|
||||
|
||||
scylla_tests = set([
|
||||
'test/boost/UUID_test',
|
||||
'test/boost/pretty_printers_test',
|
||||
'test/boost/cdc_generation_test',
|
||||
'test/boost/aggregate_fcts_test',
|
||||
'test/boost/allocation_strategy_test',
|
||||
'test/boost/alternator_unit_test',
|
||||
@@ -465,9 +470,7 @@ scylla_tests = set([
|
||||
'test/boost/auth_test',
|
||||
'test/boost/batchlog_manager_test',
|
||||
'test/boost/big_decimal_test',
|
||||
'test/boost/bptree_test',
|
||||
'test/boost/broken_sstable_test',
|
||||
'test/boost/btree_test',
|
||||
'test/boost/bytes_ostream_test',
|
||||
'test/boost/cache_algorithm_test',
|
||||
'test/boost/cache_flat_mutation_reader_test',
|
||||
@@ -476,15 +479,13 @@ scylla_tests = set([
|
||||
'test/boost/canonical_mutation_test',
|
||||
'test/boost/cartesian_product_test',
|
||||
'test/boost/castas_fcts_test',
|
||||
'test/boost/cdc_generation_test',
|
||||
'test/boost/cdc_test',
|
||||
'test/boost/cell_locker_test',
|
||||
'test/boost/checksum_utils_test',
|
||||
'test/boost/chunked_managed_vector_test',
|
||||
'test/boost/chunked_vector_test',
|
||||
'test/boost/chunked_managed_vector_test',
|
||||
'test/boost/clustering_ranges_walker_test',
|
||||
'test/boost/column_mapping_test',
|
||||
'test/boost/commitlog_cleanup_test',
|
||||
'test/boost/commitlog_test',
|
||||
'test/boost/compaction_group_test',
|
||||
'test/boost/compound_test',
|
||||
@@ -494,124 +495,102 @@ scylla_tests = set([
|
||||
'test/boost/counter_test',
|
||||
'test/boost/cql_auth_query_test',
|
||||
'test/boost/cql_auth_syntax_test',
|
||||
'test/boost/cql_functions_test',
|
||||
'test/boost/cql_query_group_test',
|
||||
'test/boost/cql_query_test',
|
||||
'test/boost/cql_query_large_test',
|
||||
'test/boost/cql_query_like_test',
|
||||
'test/boost/cql_query_test',
|
||||
'test/boost/cql_query_group_test',
|
||||
'test/boost/cql_functions_test',
|
||||
'test/boost/crc_test',
|
||||
'test/boost/data_listeners_test',
|
||||
'test/boost/database_test',
|
||||
'test/boost/commitlog_cleanup_test',
|
||||
'test/boost/dirty_memory_manager_test',
|
||||
'test/boost/double_decker_test',
|
||||
'test/boost/duration_test',
|
||||
'test/boost/dynamic_bitset_test',
|
||||
'test/boost/enum_option_test',
|
||||
'test/boost/enum_set_test',
|
||||
'test/boost/error_injection_test',
|
||||
'test/boost/estimated_histogram_test',
|
||||
'test/boost/exception_container_test',
|
||||
'test/boost/exceptions_fallback_test',
|
||||
'test/boost/exceptions_optimized_test',
|
||||
'test/boost/expr_test',
|
||||
'test/boost/extensions_test',
|
||||
'test/boost/error_injection_test',
|
||||
'test/boost/filtering_test',
|
||||
'test/boost/flat_mutation_reader_test',
|
||||
'test/boost/flush_queue_test',
|
||||
'test/boost/fragmented_temporary_buffer_test',
|
||||
'test/boost/frozen_mutation_test',
|
||||
'test/boost/generic_server_test',
|
||||
'test/boost/gossiping_property_file_snitch_test',
|
||||
'test/boost/group0_cmd_merge_test',
|
||||
'test/boost/group0_test',
|
||||
'test/boost/hash_test',
|
||||
'test/boost/hashers_test',
|
||||
'test/boost/hint_test',
|
||||
'test/boost/idl_test',
|
||||
'test/boost/index_with_paging_test',
|
||||
'test/boost/input_stream_test',
|
||||
'test/boost/intrusive_array_test',
|
||||
'test/boost/json_cql_query_test',
|
||||
'test/boost/json_test',
|
||||
'test/boost/keys_test',
|
||||
'test/boost/large_paging_state_test',
|
||||
'test/boost/recent_entries_map_test',
|
||||
'test/boost/like_matcher_test',
|
||||
'test/boost/limiting_data_source_test',
|
||||
'test/boost/linearizing_input_stream_test',
|
||||
'test/boost/lister_test',
|
||||
'test/boost/loading_cache_test',
|
||||
'test/boost/locator_topology_test',
|
||||
'test/boost/log_heap_test',
|
||||
'test/boost/logalloc_standard_allocator_segment_pool_backend_test',
|
||||
'test/boost/estimated_histogram_test',
|
||||
'test/boost/summary_test',
|
||||
'test/boost/logalloc_test',
|
||||
'test/boost/managed_bytes_test',
|
||||
'test/boost/logalloc_standard_allocator_segment_pool_backend_test',
|
||||
'test/boost/managed_vector_test',
|
||||
'test/boost/managed_bytes_test',
|
||||
'test/boost/intrusive_array_test',
|
||||
'test/boost/map_difference_test',
|
||||
'test/boost/memtable_test',
|
||||
'test/boost/multishard_combining_reader_as_mutation_source_test',
|
||||
'test/boost/multishard_mutation_query_test',
|
||||
'test/boost/murmur_hash_test',
|
||||
'test/boost/mutation_fragment_test',
|
||||
'test/boost/mutation_query_test',
|
||||
'test/boost/mutation_reader_test',
|
||||
'test/boost/multishard_combining_reader_as_mutation_source_test',
|
||||
'test/boost/mutation_test',
|
||||
'test/boost/mutation_writer_test',
|
||||
'test/boost/mvcc_test',
|
||||
'test/boost/network_topology_strategy_test',
|
||||
'test/boost/token_metadata_test',
|
||||
'test/boost/tablets_test',
|
||||
'test/boost/sessions_test',
|
||||
'test/boost/nonwrapping_interval_test',
|
||||
'test/boost/observable_test',
|
||||
'test/boost/partitioner_test',
|
||||
'test/boost/per_partition_rate_limit_test',
|
||||
'test/boost/pretty_printers_test',
|
||||
'test/boost/querier_cache_test',
|
||||
'test/boost/query_processor_test',
|
||||
'test/boost/radix_tree_test',
|
||||
'test/boost/wrapping_interval_test',
|
||||
'test/boost/range_tombstone_list_test',
|
||||
'test/boost/rate_limiter_test',
|
||||
'test/boost/reader_concurrency_semaphore_test',
|
||||
'test/boost/recent_entries_map_test',
|
||||
'test/boost/repair_test',
|
||||
'test/boost/restrictions_test',
|
||||
'test/boost/result_utils_test',
|
||||
'test/boost/reusable_buffer_test',
|
||||
'test/boost/restrictions_test',
|
||||
'test/boost/repair_test',
|
||||
'test/boost/role_manager_test',
|
||||
'test/boost/row_cache_test',
|
||||
'test/boost/rust_test',
|
||||
'test/boost/s3_test',
|
||||
'test/boost/schema_change_test',
|
||||
'test/boost/schema_changes_test',
|
||||
'test/boost/schema_loader_test',
|
||||
'test/boost/schema_registry_test',
|
||||
'test/boost/secondary_index_test',
|
||||
'test/boost/tracing_test',
|
||||
'test/boost/index_with_paging_test',
|
||||
'test/boost/serialization_test',
|
||||
'test/boost/serialized_action_test',
|
||||
'test/boost/service_level_controller_test',
|
||||
'test/boost/sessions_test',
|
||||
'test/boost/small_vector_test',
|
||||
'test/boost/snitch_reset_test',
|
||||
'test/boost/sorting_test',
|
||||
'test/boost/sstable_3_x_test',
|
||||
'test/boost/sstable_compaction_test',
|
||||
'test/boost/sstable_conforms_to_mutation_source_test',
|
||||
'test/boost/sstable_datafile_test',
|
||||
'test/boost/sstable_directory_test',
|
||||
'test/boost/sstable_generation_test',
|
||||
'test/boost/sstable_move_test',
|
||||
'test/boost/sstable_mutation_test',
|
||||
'test/boost/sstable_partition_index_cache_test',
|
||||
'test/boost/schema_changes_test',
|
||||
'test/boost/sstable_conforms_to_mutation_source_test',
|
||||
'test/boost/sstable_compaction_test',
|
||||
'test/boost/sstable_resharding_test',
|
||||
'test/boost/sstable_set_test',
|
||||
'test/boost/sstable_directory_test',
|
||||
'test/boost/sstable_test',
|
||||
'test/boost/stall_free_test',
|
||||
'test/boost/sstable_move_test',
|
||||
'test/boost/statement_restrictions_test',
|
||||
'test/boost/storage_proxy_test',
|
||||
'test/boost/string_format_test',
|
||||
'test/boost/summary_test',
|
||||
'test/boost/tablets_test',
|
||||
'test/boost/tagged_integer_test',
|
||||
'test/boost/token_metadata_test',
|
||||
'test/boost/top_k_test',
|
||||
'test/boost/tracing_test',
|
||||
'test/boost/transport_test',
|
||||
'test/boost/types_test',
|
||||
'test/boost/user_function_test',
|
||||
@@ -619,16 +598,38 @@ scylla_tests = set([
|
||||
'test/boost/utf8_test',
|
||||
'test/boost/view_build_test',
|
||||
'test/boost/view_complex_test',
|
||||
'test/boost/view_schema_ckey_test',
|
||||
'test/boost/view_schema_pkey_test',
|
||||
'test/boost/view_schema_test',
|
||||
'test/boost/view_schema_pkey_test',
|
||||
'test/boost/view_schema_ckey_test',
|
||||
'test/boost/vint_serialization_test',
|
||||
'test/boost/virtual_reader_test',
|
||||
'test/boost/virtual_table_mutation_source_test',
|
||||
'test/boost/virtual_table_test',
|
||||
'test/boost/wasm_alloc_test',
|
||||
'test/boost/wasm_test',
|
||||
'test/boost/wrapping_interval_test',
|
||||
'test/boost/wasm_alloc_test',
|
||||
'test/boost/bptree_test',
|
||||
'test/boost/btree_test',
|
||||
'test/boost/radix_tree_test',
|
||||
'test/boost/double_decker_test',
|
||||
'test/boost/stall_free_test',
|
||||
'test/boost/sstable_set_test',
|
||||
'test/boost/reader_concurrency_semaphore_test',
|
||||
'test/boost/service_level_controller_test',
|
||||
'test/boost/schema_loader_test',
|
||||
'test/boost/lister_test',
|
||||
'test/boost/group0_test',
|
||||
'test/boost/exception_container_test',
|
||||
'test/boost/result_utils_test',
|
||||
'test/boost/rate_limiter_test',
|
||||
'test/boost/per_partition_rate_limit_test',
|
||||
'test/boost/expr_test',
|
||||
'test/boost/exceptions_optimized_test',
|
||||
'test/boost/exceptions_fallback_test',
|
||||
'test/boost/s3_test',
|
||||
'test/boost/locator_topology_test',
|
||||
'test/boost/string_format_test',
|
||||
'test/boost/tagged_integer_test',
|
||||
'test/boost/group0_cmd_merge_test',
|
||||
'test/manual/ec2_snitch_test',
|
||||
'test/manual/enormous_table_scan_test',
|
||||
'test/manual/gce_snitch_test',
|
||||
@@ -779,16 +780,10 @@ arg_parser.add_argument('--date-stamp', dest='date_stamp', type=str,
|
||||
help='Set datestamp for SCYLLA-VERSION-GEN')
|
||||
arg_parser.add_argument('--use-cmake', action='store_true', help='Use CMake as the build system')
|
||||
arg_parser.add_argument('--coverage', action = 'store_true', help = 'Compile scylla with coverage instrumentation')
|
||||
arg_parser.add_argument('--build-dir', action='store', default='build',
|
||||
help='Build directory path')
|
||||
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
PROFILES_LIST_FILE_NAME = "coverage_sources.list"
|
||||
|
||||
outdir = args.build_dir
|
||||
tempfile.tempdir = f"{outdir}/tmp"
|
||||
|
||||
if args.list_artifacts:
|
||||
for artifact in sorted(all_artifacts):
|
||||
print(artifact)
|
||||
@@ -796,6 +791,7 @@ if args.list_artifacts:
|
||||
|
||||
defines = ['XXH_PRIVATE_API',
|
||||
'SEASTAR_TESTING_MAIN',
|
||||
'FMT_DEPRECATED_OSTREAM',
|
||||
]
|
||||
|
||||
scylla_raft_core = [
|
||||
@@ -828,7 +824,6 @@ scylla_core = (['message/messaging_service.cc',
|
||||
'mutation/partition_version.cc',
|
||||
'mutation/range_tombstone.cc',
|
||||
'mutation/range_tombstone_list.cc',
|
||||
'mutation/async_utils.cc',
|
||||
'absl-flat_hash_map.cc',
|
||||
'collection_mutation.cc',
|
||||
'client_data.cc',
|
||||
@@ -1016,6 +1011,7 @@ scylla_core = (['message/messaging_service.cc',
|
||||
'cql3/result_set.cc',
|
||||
'cql3/prepare_context.cc',
|
||||
'db/consistency_level.cc',
|
||||
'db/system_auth_keyspace.cc',
|
||||
'db/system_keyspace.cc',
|
||||
'db/virtual_table.cc',
|
||||
'db/virtual_tables.cc',
|
||||
@@ -1352,13 +1348,11 @@ scylla_tests_dependencies = scylla_core + alternator + idls + scylla_tests_gener
|
||||
scylla_raft_dependencies = scylla_raft_core + ['utils/uuid.cc', 'utils/error_injection.cc', 'utils/exceptions.cc']
|
||||
|
||||
scylla_tools = ['tools/scylla-types.cc', 'tools/scylla-sstable.cc', 'tools/scylla-nodetool.cc', 'tools/schema_loader.cc', 'tools/utils.cc', 'tools/lua_sstable_consumer.cc']
|
||||
scylla_perfs = ['test/perf/perf_alternator.cc',
|
||||
'test/perf/perf_fast_forward.cc',
|
||||
scylla_perfs = ['test/perf/perf_fast_forward.cc',
|
||||
'test/perf/perf_row_cache_update.cc',
|
||||
'test/perf/perf_simple_query.cc',
|
||||
'test/perf/perf_sstable.cc',
|
||||
'test/perf/perf_tablets.cc',
|
||||
'test/perf/tablet_load_balancing.cc',
|
||||
'test/perf/perf.cc',
|
||||
'test/lib/alternator_test_env.cc',
|
||||
'test/lib/cql_test_env.cc',
|
||||
@@ -1619,7 +1613,7 @@ def generate_version(date_stamp):
|
||||
date_stamp_opt = ''
|
||||
if date_stamp:
|
||||
date_stamp_opt = f'--date-stamp {date_stamp}'
|
||||
status = subprocess.call(f"./SCYLLA-VERSION-GEN --output-dir {outdir} {date_stamp_opt}", shell=True)
|
||||
status = subprocess.call(f"./SCYLLA-VERSION-GEN {date_stamp_opt}", shell=True)
|
||||
if status != 0:
|
||||
print('Version file generation failed')
|
||||
sys.exit(1)
|
||||
@@ -1753,63 +1747,6 @@ def configure_seastar(build_dir, mode, mode_config):
|
||||
subprocess.check_call(seastar_cmd, shell=False, cwd=cmake_dir)
|
||||
|
||||
|
||||
def configure_abseil(build_dir, mode, mode_config):
|
||||
abseil_cflags = mode_config['lib_cflags']
|
||||
cxx_flags = mode_config['cxxflags']
|
||||
if '-DSANITIZE' in cxx_flags:
|
||||
abseil_cflags += ' -fsanitize=address -fsanitize=undefined -fno-sanitize=vptr'
|
||||
|
||||
# We want to "undo" coverage for abseil if we have it enabled, as we are not
|
||||
# interested in the coverage of the abseil library. these flags were previously
|
||||
# added to cxx_ld_flags
|
||||
if args.coverage:
|
||||
for flag in COVERAGE_INST_FLAGS:
|
||||
cxx_flags = cxx_flags.replace(f' {flag}', '')
|
||||
|
||||
cxx_flags += ' ' + abseil_cflags.strip()
|
||||
cmake_mode = mode_config['cmake_build_type']
|
||||
abseil_cmake_args = [
|
||||
'-DCMAKE_BUILD_TYPE={}'.format(cmake_mode),
|
||||
'-DCMAKE_INSTALL_PREFIX={}'.format(build_dir + '/inst'), # just to avoid a warning from absl
|
||||
'-DCMAKE_C_COMPILER={}'.format(args.cc),
|
||||
'-DCMAKE_CXX_COMPILER={}'.format(args.cxx),
|
||||
'-DCMAKE_CXX_FLAGS_{}={}'.format(cmake_mode.upper(), cxx_flags),
|
||||
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
|
||||
'-DCMAKE_CXX_STANDARD=20',
|
||||
'-DABSL_PROPAGATE_CXX_STD=ON',
|
||||
]
|
||||
|
||||
abseil_build_dir = os.path.join(build_dir, mode, 'abseil')
|
||||
abseil_cmd = ['cmake', '-G', 'Ninja', real_relpath('abseil', abseil_build_dir)] + abseil_cmake_args
|
||||
|
||||
os.makedirs(abseil_build_dir, exist_ok=True)
|
||||
subprocess.check_call(abseil_cmd, shell=False, cwd=abseil_build_dir)
|
||||
|
||||
abseil_libs = ['absl/' + lib for lib in [
|
||||
'container/libabsl_hashtablez_sampler.a',
|
||||
'container/libabsl_raw_hash_set.a',
|
||||
'synchronization/libabsl_synchronization.a',
|
||||
'synchronization/libabsl_graphcycles_internal.a',
|
||||
'debugging/libabsl_stacktrace.a',
|
||||
'debugging/libabsl_symbolize.a',
|
||||
'debugging/libabsl_debugging_internal.a',
|
||||
'debugging/libabsl_demangle_internal.a',
|
||||
'time/libabsl_time.a',
|
||||
'time/libabsl_time_zone.a',
|
||||
'numeric/libabsl_int128.a',
|
||||
'hash/libabsl_hash.a',
|
||||
'hash/libabsl_city.a',
|
||||
'hash/libabsl_low_level_hash.a',
|
||||
'base/libabsl_malloc_internal.a',
|
||||
'base/libabsl_spinlock_wait.a',
|
||||
'base/libabsl_base.a',
|
||||
'base/libabsl_raw_logging_internal.a',
|
||||
'profiling/libabsl_exponential_biased.a',
|
||||
'strings/libabsl_strings.a',
|
||||
'strings/libabsl_strings_internal.a',
|
||||
'base/libabsl_throw_delegate.a']]
|
||||
|
||||
|
||||
def query_seastar_flags(pc_file, use_shared_libs, link_static_cxx=False):
|
||||
if use_shared_libs:
|
||||
opt = '--shared'
|
||||
@@ -1829,7 +1766,9 @@ def query_seastar_flags(pc_file, use_shared_libs, link_static_cxx=False):
|
||||
'seastar_testing_libs': testing_libs}
|
||||
|
||||
pkgs = ['libsystemd',
|
||||
'jsoncpp']
|
||||
'jsoncpp',
|
||||
'absl_raw_hash_set',
|
||||
'absl_hash']
|
||||
# Lua can be provided by lua53 package on Debian-like
|
||||
# systems and by Lua on others.
|
||||
pkgs.append('lua53' if have_pkg('lua53') else 'lua')
|
||||
@@ -1892,11 +1831,9 @@ def get_extra_cxxflags(mode, mode_config, cxx, debuginfo):
|
||||
return cxxflags
|
||||
|
||||
|
||||
def get_release_cxxflags(scylla_product,
|
||||
scylla_version,
|
||||
def get_release_cxxflags(scylla_version,
|
||||
scylla_release):
|
||||
definitions = {'SCYLLA_PRODUCT': scylla_product,
|
||||
'SCYLLA_VERSION': scylla_version,
|
||||
definitions = {'SCYLLA_VERSION': scylla_version,
|
||||
'SCYLLA_RELEASE': scylla_release}
|
||||
return [f'-D{name}="\\"{value}\\""' for name, value in definitions.items()]
|
||||
|
||||
@@ -1948,17 +1885,17 @@ def write_build_file(f,
|
||||
rule strip
|
||||
command = scripts/strip.sh $in
|
||||
rule package
|
||||
command = scripts/create-relocatable-package.py --build-dir $builddir/$mode --node-exporter-dir $builddir/node_exporter --debian-dir $builddir/debian/debian $out
|
||||
command = scripts/create-relocatable-package.py --build-dir build/$mode $out
|
||||
rule stripped_package
|
||||
command = scripts/create-relocatable-package.py --stripped --build-dir $builddir/$mode --node-exporter-dir $builddir/node_exporter --debian-dir $builddir/debian/debian $out
|
||||
command = scripts/create-relocatable-package.py --stripped --build-dir build/$mode $out
|
||||
rule debuginfo_package
|
||||
command = dist/debuginfo/scripts/create-relocatable-package.py --build-dir $builddir/$mode --node-exporter-dir $builddir/node_exporter $out
|
||||
command = dist/debuginfo/scripts/create-relocatable-package.py --build-dir build/$mode $out
|
||||
rule rpmbuild
|
||||
command = reloc/build_rpm.sh --reloc-pkg $in --builddir $out
|
||||
rule debbuild
|
||||
command = reloc/build_deb.sh --reloc-pkg $in --builddir $out
|
||||
rule unified
|
||||
command = unified/build_unified.sh --build-dir $builddir/$mode --unified-pkg $out
|
||||
command = unified/build_unified.sh --build-dir build/$mode --unified-pkg $out
|
||||
rule rust_header
|
||||
command = cxxbridge --include rust/cxx.h --header $in > $out
|
||||
description = RUST_HEADER $out
|
||||
@@ -2088,7 +2025,6 @@ def write_build_file(f,
|
||||
seastar_lib_ext = 'so' if modeval['build_seastar_shared_libs'] else 'a'
|
||||
seastar_dep = f'$builddir/{mode}/seastar/libseastar.{seastar_lib_ext}'
|
||||
seastar_testing_dep = f'$builddir/{mode}/seastar/libseastar_testing.{seastar_lib_ext}'
|
||||
abseil_dep = ' '.join(f'$builddir/{mode}/abseil/{lib}' for lib in abseil_libs)
|
||||
for binary in sorted(build_artifacts):
|
||||
if binary in other or binary in wasms:
|
||||
continue
|
||||
@@ -2118,9 +2054,6 @@ def write_build_file(f,
|
||||
if has_thrift:
|
||||
local_libs += ' ' + maybe_static(args.staticthrift, '-lthrift')
|
||||
local_libs += ' ' + maybe_static(args.staticboost, '-lboost_system')
|
||||
objs.extend(['$builddir/' + mode + '/' + artifact for artifact in [
|
||||
'abseil/' + x for x in abseil_libs
|
||||
]])
|
||||
if binary in tests:
|
||||
if binary in pure_boost_tests:
|
||||
local_libs += ' ' + maybe_static(args.staticboost, '-lboost_unit_test_framework')
|
||||
@@ -2134,14 +2067,14 @@ def write_build_file(f,
|
||||
# quickly re-link the test unstripped by adding a "_g"
|
||||
# to the test name, e.g., "ninja build/release/testname_g"
|
||||
link_rule = perf_tests_link_rule if binary.startswith('test/perf/') else tests_link_rule
|
||||
f.write('build $builddir/{}/{}: {}.{} {} | {} {} {}\n'.format(mode, binary, link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep, abseil_dep))
|
||||
f.write('build $builddir/{}/{}: {}.{} {} | {} {}\n'.format(mode, binary, link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep))
|
||||
f.write(' libs = {}\n'.format(local_libs))
|
||||
f.write('build $builddir/{}/{}_g: {}.{} {} | {} {} {}\n'.format(mode, binary, regular_link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep, abseil_dep))
|
||||
f.write('build $builddir/{}/{}_g: {}.{} {} | {} {}\n'.format(mode, binary, regular_link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep))
|
||||
f.write(' libs = {}\n'.format(local_libs))
|
||||
else:
|
||||
if binary == 'scylla':
|
||||
local_libs += ' ' + "$seastar_testing_libs_{}".format(mode)
|
||||
f.write('build $builddir/{}/{}: {}.{} {} | {} {} {}\n'.format(mode, binary, regular_link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep, abseil_dep))
|
||||
f.write('build $builddir/{}/{}: {}.{} {} | {} {}\n'.format(mode, binary, regular_link_rule, mode, str.join(' ', objs), seastar_dep, seastar_testing_dep))
|
||||
f.write(' libs = {}\n'.format(local_libs))
|
||||
f.write(f'build $builddir/{mode}/{binary}.stripped: strip $builddir/{mode}/{binary}\n')
|
||||
f.write(f'build $builddir/{mode}/{binary}.debug: phony $builddir/{mode}/{binary}.stripped\n')
|
||||
@@ -2194,17 +2127,6 @@ def write_build_file(f,
|
||||
mode=mode,
|
||||
)
|
||||
)
|
||||
compiler_training_artifacts=[]
|
||||
if mode == 'dev':
|
||||
compiler_training_artifacts.append(f'$builddir/{mode}/scylla')
|
||||
elif mode == 'release' or mode == 'debug':
|
||||
compiler_training_artifacts.append(f'$builddir/{mode}/service/storage_proxy.o')
|
||||
f.write(
|
||||
'build {mode}-compiler-training: phony {artifacts}\n'.format(
|
||||
mode=mode,
|
||||
artifacts=str.join(' ', compiler_training_artifacts)
|
||||
)
|
||||
)
|
||||
|
||||
gen_dir = '$builddir/{}/gen'.format(mode)
|
||||
gen_headers = []
|
||||
@@ -2329,12 +2251,6 @@ def write_build_file(f,
|
||||
f.write(f'build $builddir/{mode}/dist/tar/{scylla_product}-unified-package-{scylla_version}-{scylla_release}.tar.gz: copy $builddir/{mode}/dist/tar/{scylla_product}-unified-{scylla_version}-{scylla_release}.{arch}.tar.gz\n')
|
||||
f.write(f'build $builddir/{mode}/dist/tar/{scylla_product}-unified-{arch}-package-{scylla_version}-{scylla_release}.tar.gz: copy $builddir/{mode}/dist/tar/{scylla_product}-unified-{scylla_version}-{scylla_release}.{arch}.tar.gz\n')
|
||||
|
||||
for lib in abseil_libs:
|
||||
f.write('build $builddir/{mode}/abseil/{lib}: ninja $builddir/{mode}/abseil/build.ninja\n'.format(**locals()))
|
||||
f.write(' pool = submodule_pool\n')
|
||||
f.write(' subdir = $builddir/{mode}/abseil\n'.format(**locals()))
|
||||
f.write(' target = {lib}\n'.format(**locals()))
|
||||
|
||||
checkheaders_mode = 'dev' if 'dev' in modes else modes.keys()[0]
|
||||
f.write('build checkheaders: phony || {}\n'.format(' '.join(['$builddir/{}/{}.o'.format(checkheaders_mode, hh) for hh in headers])))
|
||||
|
||||
@@ -2350,9 +2266,6 @@ def write_build_file(f,
|
||||
f.write(
|
||||
'build wasm: phony {}\n'.format(' '.join([f'$builddir/{binary}' for binary in sorted(wasms)]))
|
||||
)
|
||||
f.write(
|
||||
'build compiler-training: phony {}\n'.format(' '.join(['{mode}-compiler-training'.format(mode=mode) for mode in default_modes]))
|
||||
)
|
||||
|
||||
f.write(textwrap.dedent(f'''\
|
||||
build dist-unified-tar: phony {' '.join([f'$builddir/{mode}/dist/tar/{scylla_product}-unified-{scylla_version}-{scylla_release}.{arch}.tar.gz' for mode in default_modes])}
|
||||
@@ -2365,54 +2278,54 @@ def write_build_file(f,
|
||||
build dist-server: phony dist-server-tar dist-server-debuginfo dist-server-rpm dist-server-deb
|
||||
|
||||
rule build-submodule-reloc
|
||||
command = cd $reloc_dir && ./reloc/build_reloc.sh --version $$(<../../$builddir/SCYLLA-PRODUCT-FILE)-$$(sed 's/-/~/' <../../$builddir/SCYLLA-VERSION-FILE)-$$(<../../$builddir/SCYLLA-RELEASE-FILE) --nodeps $args
|
||||
command = cd $reloc_dir && ./reloc/build_reloc.sh --version $$(<../../build/SCYLLA-PRODUCT-FILE)-$$(sed 's/-/~/' <../../build/SCYLLA-VERSION-FILE)-$$(<../../build/SCYLLA-RELEASE-FILE) --nodeps $args
|
||||
rule build-submodule-rpm
|
||||
command = cd $dir && ./reloc/build_rpm.sh --reloc-pkg $artifact
|
||||
rule build-submodule-deb
|
||||
command = cd $dir && ./reloc/build_deb.sh --reloc-pkg $artifact
|
||||
|
||||
build tools/jmx/build/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz: build-submodule-reloc | $builddir/SCYLLA-PRODUCT-FILE $builddir/SCYLLA-VERSION-FILE $builddir/SCYLLA-RELEASE-FILE
|
||||
build tools/jmx/build/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz: build-submodule-reloc | build/SCYLLA-PRODUCT-FILE build/SCYLLA-VERSION-FILE build/SCYLLA-RELEASE-FILE
|
||||
reloc_dir = tools/jmx
|
||||
build dist-jmx-rpm: build-submodule-rpm tools/jmx/build/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
dir = tools/jmx
|
||||
artifact = build/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
artifact = $builddir/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
build dist-jmx-deb: build-submodule-deb tools/jmx/build/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
dir = tools/jmx
|
||||
artifact = build/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
artifact = $builddir/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
build dist-jmx-tar: phony {' '.join(['$builddir/{mode}/dist/tar/{scylla_product}-jmx-{scylla_version}-{scylla_release}.noarch.tar.gz'.format(mode=mode, scylla_product=scylla_product, scylla_version=scylla_version, scylla_release=scylla_release) for mode in default_modes])}
|
||||
build dist-jmx: phony dist-jmx-tar dist-jmx-rpm dist-jmx-deb
|
||||
|
||||
build tools/java/build/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz: build-submodule-reloc | $builddir/SCYLLA-PRODUCT-FILE $builddir/SCYLLA-VERSION-FILE $builddir/SCYLLA-RELEASE-FILE
|
||||
build tools/java/build/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz: build-submodule-reloc | build/SCYLLA-PRODUCT-FILE build/SCYLLA-VERSION-FILE build/SCYLLA-RELEASE-FILE
|
||||
reloc_dir = tools/java
|
||||
build dist-tools-rpm: build-submodule-rpm tools/java/build/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
dir = tools/java
|
||||
artifact = build/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
artifact = $builddir/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
build dist-tools-deb: build-submodule-deb tools/java/build/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
dir = tools/java
|
||||
artifact = build/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
artifact = $builddir/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
build dist-tools-tar: phony {' '.join(['$builddir/{mode}/dist/tar/{scylla_product}-tools-{scylla_version}-{scylla_release}.noarch.tar.gz'.format(mode=mode, scylla_product=scylla_product, scylla_version=scylla_version, scylla_release=scylla_release) for mode in default_modes])}
|
||||
build dist-tools: phony dist-tools-tar dist-tools-rpm dist-tools-deb
|
||||
|
||||
build tools/cqlsh/build/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz: build-submodule-reloc | $builddir/SCYLLA-PRODUCT-FILE $builddir/SCYLLA-VERSION-FILE $builddir/SCYLLA-RELEASE-FILE
|
||||
build tools/cqlsh/build/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz: build-submodule-reloc | build/SCYLLA-PRODUCT-FILE build/SCYLLA-VERSION-FILE build/SCYLLA-RELEASE-FILE
|
||||
reloc_dir = tools/cqlsh
|
||||
build dist-cqlsh-rpm: build-submodule-rpm tools/cqlsh/build/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
dir = tools/cqlsh
|
||||
artifact = build/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
artifact = $builddir/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
build dist-cqlsh-deb: build-submodule-deb tools/cqlsh/build/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
dir = tools/cqlsh
|
||||
artifact = build/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
artifact = $builddir/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz
|
||||
build dist-cqlsh-tar: phony {' '.join(['$builddir/{mode}/dist/tar/{scylla_product}-cqlsh-{scylla_version}-{scylla_release}.noarch.tar.gz'.format(mode=mode, scylla_product=scylla_product, scylla_version=scylla_version, scylla_release=scylla_release) for mode in default_modes])}
|
||||
build dist-cqlsh: phony dist-cqlsh-tar dist-cqlsh-rpm dist-cqlsh-deb
|
||||
|
||||
build tools/python3/build/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz: build-submodule-reloc | $builddir/SCYLLA-PRODUCT-FILE $builddir/SCYLLA-VERSION-FILE $builddir/SCYLLA-RELEASE-FILE
|
||||
build tools/python3/build/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz: build-submodule-reloc | build/SCYLLA-PRODUCT-FILE build/SCYLLA-VERSION-FILE build/SCYLLA-RELEASE-FILE
|
||||
reloc_dir = tools/python3
|
||||
args = --packages "{python3_dependencies}" --pip-packages "{pip_dependencies}" --pip-symlinks "{pip_symlinks}"
|
||||
build dist-python3-rpm: build-submodule-rpm tools/python3/build/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz
|
||||
dir = tools/python3
|
||||
artifact = build/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz
|
||||
artifact = $builddir/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz
|
||||
build dist-python3-deb: build-submodule-deb tools/python3/build/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz
|
||||
dir = tools/python3
|
||||
artifact = build/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz
|
||||
artifact = $builddir/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz
|
||||
build dist-python3-tar: phony {' '.join(['$builddir/{mode}/dist/tar/{scylla_product}-python3-{scylla_version}-{scylla_release}.{arch}.tar.gz'.format(mode=mode, scylla_product=scylla_product, arch=arch, scylla_version=scylla_version, scylla_release=scylla_release) for mode in default_modes])}
|
||||
build dist-python3: phony dist-python3-tar dist-python3-rpm dist-python3-deb
|
||||
build dist-deb: phony dist-server-deb dist-python3-deb dist-jmx-deb dist-tools-deb dist-cqlsh-deb
|
||||
@@ -2447,10 +2360,10 @@ def write_build_file(f,
|
||||
|
||||
f.write(textwrap.dedent('''\
|
||||
rule configure
|
||||
command = {python} configure.py --out={buildfile}.new $configure_args && mv {buildfile}.new {buildfile}
|
||||
command = {python} configure.py --out=build.ninja.new $configure_args && mv build.ninja.new build.ninja
|
||||
generator = 1
|
||||
description = CONFIGURE $configure_args
|
||||
build {buildfile} {build_ninja_list}: configure | configure.py SCYLLA-VERSION-GEN $builddir/SCYLLA-PRODUCT-FILE $builddir/SCYLLA-VERSION-FILE $builddir/SCYLLA-RELEASE-FILE {args.seastar_path}/CMakeLists.txt
|
||||
build build.ninja {build_ninja_list}: configure | configure.py SCYLLA-VERSION-GEN $builddir/SCYLLA-PRODUCT-FILE $builddir/SCYLLA-VERSION-FILE $builddir/SCYLLA-RELEASE-FILE {args.seastar_path}/CMakeLists.txt
|
||||
rule cscope
|
||||
command = find -name '*.[chS]' -o -name "*.cc" -o -name "*.hh" | cscope -bq -i-
|
||||
description = CSCOPE
|
||||
@@ -2464,7 +2377,7 @@ def write_build_file(f,
|
||||
description = List configured modes
|
||||
build mode_list: mode_list
|
||||
default {modes_list}
|
||||
''').format(modes_list=' '.join(default_modes), build_ninja_list=' '.join([f'{outdir}/{mode}/{dir}/build.ninja' for mode in build_modes for dir in ['seastar', 'abseil']]), **globals()))
|
||||
''').format(modes_list=' '.join(default_modes), build_ninja_list=' '.join([f'build/{mode}/{dir}/build.ninja' for mode in build_modes for dir in ['seastar']]), **globals()))
|
||||
unit_test_list = set(test for test in build_artifacts if test in set(tests))
|
||||
f.write(textwrap.dedent('''\
|
||||
rule unit_test_list
|
||||
@@ -2475,14 +2388,14 @@ def write_build_file(f,
|
||||
f.write(textwrap.dedent('''\
|
||||
build always: phony
|
||||
rule scylla_version_gen
|
||||
command = ./SCYLLA-VERSION-GEN --output-dir $builddir
|
||||
command = ./SCYLLA-VERSION-GEN
|
||||
restat = 1
|
||||
build $builddir/SCYLLA-RELEASE-FILE $builddir/SCYLLA-VERSION-FILE: scylla_version_gen | always
|
||||
rule debian_files_gen
|
||||
command = ./dist/debian/debian_files_gen.py --build-dir $builddir
|
||||
command = ./dist/debian/debian_files_gen.py
|
||||
build $builddir/debian/debian: debian_files_gen | always
|
||||
rule extract_node_exporter
|
||||
command = tar -C $builddir -xvpf {node_exporter_filename} --no-same-owner && rm -rfv $builddir/node_exporter && mv -v $builddir/{node_exporter_dirname} $builddir/node_exporter
|
||||
command = tar -C build -xvpf {node_exporter_filename} --no-same-owner && rm -rfv build/node_exporter && mv -v build/{node_exporter_dirname} build/node_exporter
|
||||
build $builddir/node_exporter/node_exporter: extract_node_exporter | always
|
||||
build $builddir/node_exporter/node_exporter.stripped: strip $builddir/node_exporter/node_exporter
|
||||
build $builddir/node_exporter/node_exporter.debug: phony $builddir/node_exporter/node_exporter.stripped
|
||||
@@ -2505,17 +2418,14 @@ def create_build_system(args):
|
||||
extra_cxxflags = ' '.join(get_extra_cxxflags(mode, mode_config, args.cxx, args.debuginfo))
|
||||
mode_config['cxxflags'] += f' {extra_cxxflags}'
|
||||
|
||||
mode_config['per_src_extra_cxxflags']['release.cc'] = ' '.join(get_release_cxxflags(scylla_product, scylla_version, scylla_release))
|
||||
mode_config['per_src_extra_cxxflags']['release.cc'] = ' '.join(get_release_cxxflags(scylla_version, scylla_release))
|
||||
|
||||
if not args.dist_only:
|
||||
global user_cflags, libs
|
||||
# args.buildfile builds seastar with the rules of
|
||||
# {outdir}/{mode}/seastar/build.ninja, and
|
||||
# {outdir}/{mode}/seastar/seastar.pc is queried for building flags
|
||||
for mode, mode_config in build_modes.items():
|
||||
configure_seastar(outdir, mode, mode_config)
|
||||
configure_abseil(outdir, mode, mode_config)
|
||||
user_cflags += ' -isystem abseil'
|
||||
|
||||
for mode, mode_config in build_modes.items():
|
||||
mode_config.update(query_seastar_flags(f'{outdir}/{mode}/seastar/seastar.pc',
|
||||
@@ -2569,6 +2479,7 @@ def configure_using_cmake(args):
|
||||
if args.staticboost:
|
||||
settings['Boost_USE_STATIC_LIBS'] = 'ON'
|
||||
|
||||
|
||||
source_dir = os.path.realpath(os.path.dirname(__file__))
|
||||
build_dir = os.path.join(source_dir, 'build')
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ public:
|
||||
using counter_shard_view = basic_counter_shard_view<mutable_view::no>;
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<counter_shard_view> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<counter_shard_view> : fmt::formatter<std::string_view> {
|
||||
auto format(const counter_shard_view&, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
@@ -352,7 +352,7 @@ struct counter_cell_view : basic_counter_cell_view<mutable_view::no> {
|
||||
};
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<counter_cell_view> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<counter_cell_view> : fmt::formatter<std::string_view> {
|
||||
auto format(const counter_cell_view&, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
|
||||
@@ -131,7 +131,6 @@ target_link_libraries(cql3
|
||||
idl
|
||||
wasmtime_bindings
|
||||
Seastar::seastar
|
||||
absl::headers
|
||||
xxHash::xxhash
|
||||
ANTLR3::antlr3
|
||||
PRIVATE
|
||||
|
||||
22
cql3/Cql.g
22
cql3/Cql.g
@@ -68,7 +68,6 @@ options {
|
||||
#include "cql3/statements/ks_prop_defs.hh"
|
||||
#include "cql3/selection/raw_selector.hh"
|
||||
#include "cql3/selection/selectable-expr.hh"
|
||||
#include "cql3/dialect.hh"
|
||||
#include "cql3/keyspace_element_name.hh"
|
||||
#include "cql3/constants.hh"
|
||||
#include "cql3/operation_impl.hh"
|
||||
@@ -149,8 +148,6 @@ using uexpression = uninitialized<expression>;
|
||||
|
||||
listener_type* listener;
|
||||
|
||||
dialect _dialect;
|
||||
|
||||
// Keeps the names of all bind variables. For bind variables without a name ('?'), the name is nullptr.
|
||||
// Maps bind_index -> name.
|
||||
std::vector<::shared_ptr<cql3::column_identifier>> _bind_variable_names;
|
||||
@@ -174,14 +171,9 @@ using uexpression = uninitialized<expression>;
|
||||
return s;
|
||||
}
|
||||
|
||||
void set_dialect(dialect d) {
|
||||
_dialect = d;
|
||||
}
|
||||
|
||||
bind_variable new_bind_variables(shared_ptr<cql3::column_identifier> name)
|
||||
{
|
||||
if (_dialect.duplicate_bind_variable_names_refer_to_same_variable
|
||||
&& name && _named_bind_variables_indexes.contains(*name)) {
|
||||
if (name && _named_bind_variables_indexes.contains(*name)) {
|
||||
return bind_variable{_named_bind_variables_indexes[*name]};
|
||||
}
|
||||
auto marker = bind_variable{_bind_variable_names.size()};
|
||||
@@ -562,10 +554,6 @@ usingTimeoutClause[std::unique_ptr<cql3::attributes::raw>& attrs]
|
||||
: K_USING K_TIMEOUT to=term { attrs->timeout = std::move(to); }
|
||||
;
|
||||
|
||||
usingTimestampClause[std::unique_ptr<cql3::attributes::raw>& attrs]
|
||||
: K_USING K_TIMESTAMP ts=intValue { attrs->timestamp = std::move(ts); }
|
||||
;
|
||||
|
||||
/**
|
||||
* UPDATE <CF>
|
||||
* USING TIMESTAMP <long>
|
||||
@@ -996,17 +984,16 @@ alterKeyspaceStatement returns [std::unique_ptr<cql3::statements::alter_keyspace
|
||||
/**
|
||||
* ALTER COLUMN FAMILY <CF> ALTER <column> TYPE <newtype>;
|
||||
* ALTER COLUMN FAMILY <CF> ADD <column> <newtype>; | ALTER COLUMN FAMILY <CF> ADD (<column> <newtype>,<column1> <newtype1>..... <column n> <newtype n>)
|
||||
* ALTER COLUMN FAMILY <CF> DROP <column> [USING TIMESTAMP <ts>]; | ALTER COLUMN FAMILY <CF> DROP ( <column>,<column1>.....<column n>) [USING TIMESTAMP <ts>]
|
||||
* ALTER COLUMN FAMILY <CF> DROP <column>; | ALTER COLUMN FAMILY <CF> DROP ( <column>,<column1>.....<column n>)
|
||||
* ALTER COLUMN FAMILY <CF> WITH <property> = <value>;
|
||||
* ALTER COLUMN FAMILY <CF> RENAME <column> TO <column>;
|
||||
*/
|
||||
alterTableStatement returns [std::unique_ptr<alter_table_statement::raw_statement> expr]
|
||||
alterTableStatement returns [std::unique_ptr<alter_table_statement> expr]
|
||||
@init {
|
||||
alter_table_statement::type type;
|
||||
auto props = cql3::statements::cf_prop_defs();
|
||||
std::vector<alter_table_statement::column_change> column_changes;
|
||||
std::vector<std::pair<shared_ptr<cql3::column_identifier::raw>, shared_ptr<cql3::column_identifier::raw>>> renames;
|
||||
auto attrs = std::make_unique<cql3::attributes::raw>();
|
||||
}
|
||||
: K_ALTER K_COLUMNFAMILY cf=columnFamilyName
|
||||
( K_ALTER id=cident K_TYPE v=comparatorType { type = alter_table_statement::type::alter; column_changes.emplace_back(alter_table_statement::column_change{id, v}); }
|
||||
@@ -1020,14 +1007,13 @@ alterTableStatement returns [std::unique_ptr<alter_table_statement::raw_statemen
|
||||
| '(' id1=cident { column_changes.emplace_back(alter_table_statement::column_change{id1}); }
|
||||
(',' idn=cident { column_changes.emplace_back(alter_table_statement::column_change{idn}); } )* ')'
|
||||
)
|
||||
( usingTimestampClause[attrs] )?
|
||||
| K_WITH properties[props] { type = alter_table_statement::type::opts; }
|
||||
| K_RENAME { type = alter_table_statement::type::rename; }
|
||||
id1=cident K_TO toId1=cident { renames.emplace_back(id1, toId1); }
|
||||
( K_AND idn=cident K_TO toIdn=cident { renames.emplace_back(idn, toIdn); } )*
|
||||
)
|
||||
{
|
||||
$expr = std::make_unique<alter_table_statement::raw_statement>(std::move(cf), type, std::move(column_changes), std::move(props), std::move(renames), std::move(attrs));
|
||||
$expr = std::make_unique<alter_table_statement>(std::move(cf), type, std::move(column_changes), std::move(props), std::move(renames));
|
||||
}
|
||||
;
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
#include "cql3/column_identifier.hh"
|
||||
#include "cql3/expr/evaluate.hh"
|
||||
#include "cql3/expr/expr-utils.hh"
|
||||
#include "exceptions/exceptions.hh"
|
||||
#include <optional>
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
@@ -119,13 +119,13 @@ struct hash<cql3::column_identifier_raw> {
|
||||
|
||||
}
|
||||
|
||||
template <> struct fmt::formatter<cql3::column_identifier> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<cql3::column_identifier> : fmt::formatter<std::string_view> {
|
||||
auto format(const cql3::column_identifier& i, fmt::format_context& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{}", i.text());
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct fmt::formatter<cql3::column_identifier_raw> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<cql3::column_identifier_raw> : fmt::formatter<std::string_view> {
|
||||
auto format(const cql3::column_identifier_raw& id, fmt::format_context& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "{}", id.text());
|
||||
}
|
||||
|
||||
@@ -454,8 +454,7 @@ sstring maybe_quote(const sstring& identifier) {
|
||||
// many keywords but allow keywords listed as "unreserved keywords".
|
||||
// So we can use any of them, for example cident.
|
||||
try {
|
||||
// In general it's not a good idea to use the default dialect, but for parsing an identifier, it's okay.
|
||||
cql3::util::do_with_parser(identifier, dialect{}, std::mem_fn(&cql3_parser::CqlParser::cident));
|
||||
cql3::util::do_with_parser(identifier, std::mem_fn(&cql3_parser::CqlParser::cident));
|
||||
return identifier;
|
||||
} catch(exceptions::syntax_exception&) {
|
||||
// This alphanumeric string is not a valid identifier, so fall
|
||||
|
||||
@@ -365,9 +365,9 @@ inline bool operator==(const cql3_type& a, const cql3_type& b) {
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<cql3::cql3_type>: fmt::formatter<string_view> {
|
||||
struct fmt::formatter<cql3::cql3_type>: fmt::formatter<std::string_view> {
|
||||
auto format(const cql3::cql3_type& t, fmt::format_context& ctx) const {
|
||||
return formatter<string_view>::format(format_as(t), ctx);
|
||||
return formatter<std::string_view>::format(format_as(t), ctx);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
// Copyright (C) 2024-present ScyllaDB
|
||||
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <fmt/core.h>
|
||||
|
||||
namespace cql3 {
|
||||
|
||||
struct dialect {
|
||||
bool duplicate_bind_variable_names_refer_to_same_variable = true; // if :a is found twice in a query, the two references are to the same variable (see #15559)
|
||||
bool operator==(const dialect&) const = default;
|
||||
};
|
||||
|
||||
inline
|
||||
dialect
|
||||
internal_dialect() {
|
||||
return dialect{
|
||||
.duplicate_bind_variable_names_refer_to_same_variable = true,
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<cql3::dialect> {
|
||||
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
|
||||
|
||||
template <typename FormatContext>
|
||||
auto format(const cql3::dialect& d, FormatContext& ctx) const {
|
||||
return fmt::format_to(ctx.out(), "cql3::dialect{{duplicate_bind_variable_names_refer_to_same_variable={}}}",
|
||||
d.duplicate_bind_variable_names_refer_to_same_variable);
|
||||
}
|
||||
};
|
||||
@@ -574,7 +574,7 @@ struct fmt::formatter<E> : public fmt::formatter<cql3::expr::expression> {
|
||||
};
|
||||
|
||||
template <>
|
||||
struct fmt::formatter<cql3::expr::column_mutation_attribute::attribute_kind> : fmt::formatter<string_view> {
|
||||
struct fmt::formatter<cql3::expr::column_mutation_attribute::attribute_kind> : fmt::formatter<std::string_view> {
|
||||
template <typename FormatContext>
|
||||
auto format(cql3::expr::column_mutation_attribute::attribute_kind k, FormatContext& ctx) const {
|
||||
switch (k) {
|
||||
|
||||
@@ -578,7 +578,7 @@ tuple_constructor_prepare_nontuple(const tuple_constructor& tc, data_dictionary:
|
||||
|
||||
}
|
||||
|
||||
template <> struct fmt::formatter<cql3::expr::untyped_constant::type_class> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<cql3::expr::untyped_constant::type_class> : fmt::formatter<std::string_view> {
|
||||
auto format(cql3::expr::untyped_constant::type_class t, fmt::format_context& ctx) const {
|
||||
using enum cql3::expr::untyped_constant::type_class;
|
||||
std::string_view name;
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
#include <fmt/core.h>
|
||||
|
||||
|
||||
template <> struct fmt::formatter<std::vector<data_type>> : fmt::formatter<string_view> {
|
||||
template <> struct fmt::formatter<std::vector<data_type>> : fmt::formatter<std::string_view> {
|
||||
auto format(const std::vector<data_type>& arg_types, fmt::format_context& ctx) const -> decltype(ctx.out());
|
||||
};
|
||||
|
||||
|
||||
@@ -69,16 +69,6 @@ using bytes_opt = std::optional<bytes>;
|
||||
template<typename ToType, typename FromType>
|
||||
static data_value castas_fctn_simple(data_value from) {
|
||||
auto val_from = value_cast<FromType>(from);
|
||||
// Workaround for https://github.com/boostorg/multiprecision/issues/553 (the additional bug discovered post-closing)
|
||||
if constexpr (std::is_floating_point_v<ToType> && std::is_same_v<FromType, utils::multiprecision_int>) {
|
||||
static auto min = utils::multiprecision_int(std::numeric_limits<ToType>::lowest());
|
||||
static auto max = utils::multiprecision_int(std::numeric_limits<ToType>::max());
|
||||
if (val_from < min) {
|
||||
return -std::numeric_limits<ToType>::infinity();
|
||||
} else if (val_from > max) {
|
||||
return std::numeric_limits<ToType>::infinity();
|
||||
}
|
||||
}
|
||||
return static_cast<ToType>(val_from);
|
||||
}
|
||||
|
||||
|
||||
@@ -332,9 +332,6 @@ functions::get(data_dictionary::database db,
|
||||
if (!receiver_cf.has_value()) {
|
||||
throw exceptions::invalid_request_exception("functions::get for token doesn't have a known column family");
|
||||
}
|
||||
if (schema == nullptr) {
|
||||
throw exceptions::invalid_request_exception(format("functions::get for token cannot find {} table", *receiver_cf));
|
||||
}
|
||||
auto fun = ::make_shared<token_fct>(schema);
|
||||
validate_types(db, keyspace, schema.get(), fun, provided_args, receiver_ks, receiver_cf);
|
||||
return fun;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user