Compare commits
7 Commits
next
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea26e4b3a5 | ||
|
|
3b793ef09f | ||
|
|
df0a59ba03 | ||
|
|
69024a09b2 | ||
|
|
bb8f28a1ab | ||
|
|
32bc7e3a1c | ||
|
|
fb4e37248d |
8
.github/CODEOWNERS
vendored
8
.github/CODEOWNERS
vendored
@@ -1,5 +1,5 @@
|
|||||||
# AUTH
|
# AUTH
|
||||||
auth/* @nuivall
|
auth/* @nuivall @ptrsmrn
|
||||||
|
|
||||||
# CACHE
|
# CACHE
|
||||||
row_cache* @tgrabiec
|
row_cache* @tgrabiec
|
||||||
@@ -25,11 +25,11 @@ compaction/* @raphaelsc
|
|||||||
transport/*
|
transport/*
|
||||||
|
|
||||||
# CQL QUERY LANGUAGE
|
# CQL QUERY LANGUAGE
|
||||||
cql3/* @tgrabiec @nuivall
|
cql3/* @tgrabiec @nuivall @ptrsmrn
|
||||||
|
|
||||||
# COUNTERS
|
# COUNTERS
|
||||||
counters* @nuivall
|
counters* @nuivall @ptrsmrn
|
||||||
tests/counter_test* @nuivall
|
tests/counter_test* @nuivall @ptrsmrn
|
||||||
|
|
||||||
# DOCS
|
# DOCS
|
||||||
docs/* @annastuchlik @tzach
|
docs/* @annastuchlik @tzach
|
||||||
|
|||||||
29
.github/copilot-instructions.md
vendored
29
.github/copilot-instructions.md
vendored
@@ -55,26 +55,22 @@ ninja build/<mode>/test/boost/<test_name>
|
|||||||
ninja build/<mode>/scylla
|
ninja build/<mode>/scylla
|
||||||
|
|
||||||
# Run all tests in a file
|
# Run all tests in a file
|
||||||
./test.py --mode=<mode> test/<suite>/<test_name>.py
|
./test.py --mode=<mode> <test_path>
|
||||||
|
|
||||||
# Run a single test case from a file
|
# Run a single test case from a file
|
||||||
./test.py --mode=<mode> test/<suite>/<test_name>.py::<test_function_name>
|
./test.py --mode=<mode> <test_path>::<test_function_name>
|
||||||
|
|
||||||
# Run all tests in a directory
|
|
||||||
./test.py --mode=<mode> test/<suite>/
|
|
||||||
|
|
||||||
# Examples
|
# Examples
|
||||||
./test.py --mode=dev test/alternator/
|
./test.py --mode=dev alternator/
|
||||||
./test.py --mode=dev test/cluster/test_raft_voters.py::test_raft_limited_voters_retain_coordinator
|
./test.py --mode=dev cluster/test_raft_voters::test_raft_limited_voters_retain_coordinator
|
||||||
./test.py --mode=dev test/cqlpy/test_json.py
|
|
||||||
|
|
||||||
# Optional flags
|
# Optional flags
|
||||||
./test.py --mode=dev test/cluster/test_raft_no_quorum.py -v # Verbose output
|
./test.py --mode=dev cluster/test_raft_no_quorum -v # Verbose output
|
||||||
./test.py --mode=dev test/cluster/test_raft_no_quorum.py --repeat 5 # Repeat test 5 times
|
./test.py --mode=dev cluster/test_raft_no_quorum --repeat 5 # Repeat test 5 times
|
||||||
```
|
```
|
||||||
|
|
||||||
**Important:**
|
**Important:**
|
||||||
- Use full path with `.py` extension (e.g., `test/cluster/test_raft_no_quorum.py`, not `cluster/test_raft_no_quorum`)
|
- Use path without `.py` extension (e.g., `cluster/test_raft_no_quorum`, not `cluster/test_raft_no_quorum.py`)
|
||||||
- To run a single test case, append `::<test_function_name>` to the file path
|
- To run a single test case, append `::<test_function_name>` to the file path
|
||||||
- Add `-v` for verbose output
|
- Add `-v` for verbose output
|
||||||
- Add `--repeat <num>` to repeat a test multiple times
|
- Add `--repeat <num>` to repeat a test multiple times
|
||||||
@@ -88,14 +84,3 @@ ninja build/<mode>/scylla
|
|||||||
- Strive for simplicity and clarity, add complexity only when clearly justified
|
- Strive for simplicity and clarity, add complexity only when clearly justified
|
||||||
- Question requests: don't blindly implement requests - evaluate trade-offs, identify issues, and suggest better alternatives when appropriate
|
- Question requests: don't blindly implement requests - evaluate trade-offs, identify issues, and suggest better alternatives when appropriate
|
||||||
- Consider different approaches, weigh pros and cons, and recommend the best fit for the specific context
|
- Consider different approaches, weigh pros and cons, and recommend the best fit for the specific context
|
||||||
|
|
||||||
## Test Philosophy
|
|
||||||
- Performance matters. Tests should run as quickly as possible. Sleeps in the code are highly discouraged and should be avoided, to reduce run time and flakiness.
|
|
||||||
- Stability matters. Tests should be stable. New tests should be executed 100 times at least to ensure they pass 100 out of 100 times. (use --repeat 100 --max-failures 1 when running it)
|
|
||||||
- Unit tests should ideally test one thing and one thing only.
|
|
||||||
- Tests for bug fixes should run before the fix - and show the failure and after the fix - and show they now pass.
|
|
||||||
- Tests for bug fixes should have in their comments which bug fixes (GitHub or JIRA issue) they test.
|
|
||||||
- Tests in debug are always slower, so if needed, reduce number of iterations, rows, data used, cycles, etc. in debug mode.
|
|
||||||
- Tests should strive to be repeatable, and not use random input that will make their results unpredictable.
|
|
||||||
- Tests should consume as little resources as possible. Prefer running tests on a single node if it is sufficient, for example.
|
|
||||||
|
|
||||||
|
|||||||
2
.github/dependabot.yml
vendored
2
.github/dependabot.yml
vendored
@@ -1,6 +1,6 @@
|
|||||||
version: 2
|
version: 2
|
||||||
updates:
|
updates:
|
||||||
- package-ecosystem: "uv"
|
- package-ecosystem: "pip"
|
||||||
directory: "/docs"
|
directory: "/docs"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "daily"
|
interval: "daily"
|
||||||
|
|||||||
2
.github/scripts/auto-backport.py
vendored
2
.github/scripts/auto-backport.py
vendored
@@ -62,7 +62,7 @@ def create_pull_request(repo, new_branch_name, base_branch_name, pr, backport_pr
|
|||||||
if is_draft:
|
if is_draft:
|
||||||
labels_to_add.append("conflicts")
|
labels_to_add.append("conflicts")
|
||||||
pr_comment = f"@{pr.user.login} - This PR was marked as draft because it has conflicts\n"
|
pr_comment = f"@{pr.user.login} - This PR was marked as draft because it has conflicts\n"
|
||||||
pr_comment += "Please resolve them and remove the 'conflicts' label. The PR will be made ready for review automatically."
|
pr_comment += "Please resolve them and mark this PR as ready for review"
|
||||||
backport_pr.create_issue_comment(pr_comment)
|
backport_pr.create_issue_comment(pr_comment)
|
||||||
|
|
||||||
# Apply all labels at once if we have any
|
# Apply all labels at once if we have any
|
||||||
|
|||||||
@@ -8,9 +8,6 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
check-fixes-prefix:
|
check-fixes-prefix:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
issues: write
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check PR body for "Fixes" prefix patterns
|
- name: Check PR body for "Fixes" prefix patterns
|
||||||
uses: actions/github-script@v7
|
uses: actions/github-script@v7
|
||||||
@@ -21,7 +18,7 @@ jobs:
|
|||||||
|
|
||||||
// Regular expression pattern to check for "Fixes" prefix
|
// Regular expression pattern to check for "Fixes" prefix
|
||||||
// Adjusted to dynamically insert the repository full name
|
// Adjusted to dynamically insert the repository full name
|
||||||
const pattern = `Fixes:? ((?:#|${repo.replace('/', '\\/')}#|https://github\\.com/${repo.replace('/', '\\/')}/issues/)(\\d+)|(?:https://scylladb\\.atlassian\\.net/browse/)?([A-Z]+-\\d+))`;
|
const pattern = `Fixes:? (?:#|${repo.replace('/', '\\/')}#|https://github\\.com/${repo.replace('/', '\\/')}/issues/)(\\d+)`;
|
||||||
const regex = new RegExp(pattern);
|
const regex = new RegExp(pattern);
|
||||||
|
|
||||||
if (!regex.test(body)) {
|
if (!regex.test(body)) {
|
||||||
|
|||||||
53
.github/workflows/call_backport_with_jira.yaml
vendored
53
.github/workflows/call_backport_with_jira.yaml
vendored
@@ -1,53 +0,0 @@
|
|||||||
name: Backport with Jira Integration
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- next-*.*
|
|
||||||
- branch-*.*
|
|
||||||
pull_request_target:
|
|
||||||
types: [labeled, closed]
|
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
- next
|
|
||||||
- next-*.*
|
|
||||||
- branch-*.*
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
backport-on-push:
|
|
||||||
if: github.event_name == 'push'
|
|
||||||
uses: scylladb/github-automation/.github/workflows/backport-with-jira.yaml@main
|
|
||||||
with:
|
|
||||||
event_type: 'push'
|
|
||||||
base_branch: ${{ github.ref }}
|
|
||||||
commits: ${{ github.event.before }}..${{ github.sha }}
|
|
||||||
secrets:
|
|
||||||
gh_token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
|
||||||
jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
|
|
||||||
backport-on-label:
|
|
||||||
if: github.event_name == 'pull_request_target' && github.event.action == 'labeled'
|
|
||||||
uses: scylladb/github-automation/.github/workflows/backport-with-jira.yaml@main
|
|
||||||
with:
|
|
||||||
event_type: 'labeled'
|
|
||||||
base_branch: refs/heads/${{ github.event.pull_request.base.ref }}
|
|
||||||
pull_request_number: ${{ github.event.pull_request.number }}
|
|
||||||
head_commit: ${{ github.event.pull_request.base.sha }}
|
|
||||||
label_name: ${{ github.event.label.name }}
|
|
||||||
pr_state: ${{ github.event.pull_request.state }}
|
|
||||||
secrets:
|
|
||||||
gh_token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
|
||||||
jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
|
|
||||||
backport-chain:
|
|
||||||
if: github.event_name == 'pull_request_target' && github.event.action == 'closed' && github.event.pull_request.merged == true
|
|
||||||
uses: scylladb/github-automation/.github/workflows/backport-with-jira.yaml@main
|
|
||||||
with:
|
|
||||||
event_type: 'chain'
|
|
||||||
base_branch: refs/heads/${{ github.event.pull_request.base.ref }}
|
|
||||||
pull_request_number: ${{ github.event.pull_request.number }}
|
|
||||||
pr_body: ${{ github.event.pull_request.body }}
|
|
||||||
secrets:
|
|
||||||
gh_token: ${{ secrets.AUTO_BACKPORT_TOKEN }}
|
|
||||||
jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
12
.github/workflows/call_jira_status_in_progress.yml
vendored
Normal file
12
.github/workflows/call_jira_status_in_progress.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
name: Call Jira Status In Progress
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [opened]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
call-jira-status-in-progress:
|
||||||
|
uses: scylladb/github-automation/.github/workflows/main_update_jira_status_to_in_progress.yml@main
|
||||||
|
secrets:
|
||||||
|
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
||||||
|
|
||||||
12
.github/workflows/call_jira_status_in_review.yml
vendored
Normal file
12
.github/workflows/call_jira_status_in_review.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
name: Call Jira Status In Review
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [ready_for_review, review_requested]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
call-jira-status-in-review:
|
||||||
|
uses: scylladb/github-automation/.github/workflows/main_update_jira_status_to_in_review.yml@main
|
||||||
|
secrets:
|
||||||
|
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
||||||
|
|
||||||
12
.github/workflows/call_jira_status_ready_for_merge.yml
vendored
Normal file
12
.github/workflows/call_jira_status_ready_for_merge.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
name: Call Jira Status Ready For Merge
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
types: [labeled]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
call-jira-status-update:
|
||||||
|
uses: scylladb/github-automation/.github/workflows/main_update_jira_status_to_ready_for_merge.yml@main
|
||||||
|
secrets:
|
||||||
|
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
||||||
|
|
||||||
18
.github/workflows/call_jira_sync.yml
vendored
18
.github/workflows/call_jira_sync.yml
vendored
@@ -1,18 +0,0 @@
|
|||||||
name: Sync Jira Based on PR Events
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [opened, edited, ready_for_review, review_requested, labeled, unlabeled, closed]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: write
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
jira-sync:
|
|
||||||
uses: scylladb/github-automation/.github/workflows/main_pr_events_jira_sync.yml@main
|
|
||||||
with:
|
|
||||||
caller_action: ${{ github.event.action }}
|
|
||||||
secrets:
|
|
||||||
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
name: Sync Jira Based on PR Milestone Events
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types: [milestoned, demilestoned]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
jira-sync-milestone-set:
|
|
||||||
if: github.event.action == 'milestoned'
|
|
||||||
uses: scylladb/github-automation/.github/workflows/main_jira_sync_pr_milestone_set.yml@main
|
|
||||||
secrets:
|
|
||||||
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
|
|
||||||
jira-sync-milestone-removed:
|
|
||||||
if: github.event.action == 'demilestoned'
|
|
||||||
uses: scylladb/github-automation/.github/workflows/main_jira_sync_pr_milestone_removed.yml@main
|
|
||||||
secrets:
|
|
||||||
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
name: Call Jira release creation for new milestone
|
|
||||||
|
|
||||||
on:
|
|
||||||
milestone:
|
|
||||||
types: [created, closed]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
sync-milestone-to-jira:
|
|
||||||
uses: scylladb/github-automation/.github/workflows/main_sync_milestone_to_jira_release.yml@main
|
|
||||||
with:
|
|
||||||
# Comma-separated list of Jira project keys
|
|
||||||
jira_project_keys: "SCYLLADB,CUSTOMER,SMI,RELENG,VECTOR"
|
|
||||||
secrets:
|
|
||||||
caller_jira_auth: ${{ secrets.USER_AND_KEY_FOR_JIRA_AUTOMATION }}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
name: validate_pr_author_email
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- opened
|
|
||||||
- synchronize
|
|
||||||
- reopened
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
validate_pr_author_email:
|
|
||||||
uses: scylladb/github-automation/.github/workflows/validate_pr_author_email.yml@main
|
|
||||||
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
name: Close issues created by Scylla associates
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [opened, reopened]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
comment-and-close:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Comment and close if author email is scylladb.com
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
script: |
|
|
||||||
const issue = context.payload.issue;
|
|
||||||
const actor = context.actor;
|
|
||||||
|
|
||||||
// Get user data (only public email is available)
|
|
||||||
const { data: user } = await github.rest.users.getByUsername({
|
|
||||||
username: actor,
|
|
||||||
});
|
|
||||||
|
|
||||||
const email = user.email || "";
|
|
||||||
console.log(`Actor: ${actor}, public email: ${email || "<none>"}`);
|
|
||||||
|
|
||||||
// Only continue if email exists and ends with @scylladb.com
|
|
||||||
if (!email || !email.toLowerCase().endsWith("@scylladb.com")) {
|
|
||||||
console.log("User is not a scylladb.com email (or email not public); skipping.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const owner = context.repo.owner;
|
|
||||||
const repo = context.repo.repo;
|
|
||||||
const issue_number = issue.number;
|
|
||||||
|
|
||||||
const body = "Issues in this repository are closed automatically. Scylla associates should use Jira to manage issues.\nPlease move this issue to Jira https://scylladb.atlassian.net/jira/software/c/projects/SCYLLADB/list";
|
|
||||||
|
|
||||||
// Add the comment
|
|
||||||
await github.rest.issues.createComment({
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
issue_number,
|
|
||||||
body,
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`Comment added to #${issue_number}`);
|
|
||||||
|
|
||||||
// Close the issue
|
|
||||||
await github.rest.issues.update({
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
issue_number,
|
|
||||||
state: "closed",
|
|
||||||
state_reason: "not_planned"
|
|
||||||
});
|
|
||||||
|
|
||||||
console.log(`Issue #${issue_number} closed.`);
|
|
||||||
2
.github/workflows/codespell.yaml
vendored
2
.github/workflows/codespell.yaml
vendored
@@ -13,5 +13,5 @@ jobs:
|
|||||||
- uses: codespell-project/actions-codespell@master
|
- uses: codespell-project/actions-codespell@master
|
||||||
with:
|
with:
|
||||||
only_warn: 1
|
only_warn: 1
|
||||||
ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison,iif,tread"
|
ignore_words_list: "ans,datas,fo,ser,ue,crate,nd,reenable,strat,stap,te,raison"
|
||||||
skip: "./.git,./build,./tools,*.js,*.lock,./test,./licenses,./redis/lolwut.cc,*.svg"
|
skip: "./.git,./build,./tools,*.js,*.lock,./test,./licenses,./redis/lolwut.cc,*.svg"
|
||||||
|
|||||||
8
.github/workflows/docs-pages.yaml
vendored
8
.github/workflows/docs-pages.yaml
vendored
@@ -18,10 +18,6 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
release:
|
release:
|
||||||
permissions:
|
|
||||||
pages: write
|
|
||||||
id-token: write
|
|
||||||
contents: write
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
@@ -33,9 +29,7 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: "3.10"
|
||||||
- name: Install uv
|
|
||||||
uses: astral-sh/setup-uv@v6
|
|
||||||
- name: Set up env
|
- name: Set up env
|
||||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||||
- name: Build docs
|
- name: Build docs
|
||||||
|
|||||||
7
.github/workflows/docs-pr.yaml
vendored
7
.github/workflows/docs-pr.yaml
vendored
@@ -2,9 +2,6 @@ name: "Docs / Build PR"
|
|||||||
# For more information,
|
# For more information,
|
||||||
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
# see https://sphinx-theme.scylladb.com/stable/deployment/production.html#available-workflows
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
FLAG: ${{ github.repository == 'scylladb/scylla-enterprise' && 'enterprise' || 'opensource' }}
|
||||||
|
|
||||||
@@ -29,9 +26,7 @@ jobs:
|
|||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.12"
|
python-version: "3.10"
|
||||||
- name: Install uv
|
|
||||||
uses: astral-sh/setup-uv@v6
|
|
||||||
- name: Set up env
|
- name: Set up env
|
||||||
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
run: make -C docs FLAG="${{ env.FLAG }}" setupenv
|
||||||
- name: Build docs
|
- name: Build docs
|
||||||
|
|||||||
13
.github/workflows/docs-validate-metrics.yml
vendored
13
.github/workflows/docs-validate-metrics.yml
vendored
@@ -1,8 +1,5 @@
|
|||||||
name: Docs / Validate metrics
|
name: Docs / Validate metrics
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
@@ -10,7 +7,7 @@ on:
|
|||||||
- enterprise
|
- enterprise
|
||||||
paths:
|
paths:
|
||||||
- '**/*.cc'
|
- '**/*.cc'
|
||||||
- 'scripts/metrics-config.yml'
|
- 'scripts/metrics-config.yml'
|
||||||
- 'scripts/get_description.py'
|
- 'scripts/get_description.py'
|
||||||
- 'docs/_ext/scylladb_metrics.py'
|
- 'docs/_ext/scylladb_metrics.py'
|
||||||
|
|
||||||
@@ -18,20 +15,20 @@ jobs:
|
|||||||
validate-metrics:
|
validate-metrics:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Check metrics documentation coverage
|
name: Check metrics documentation coverage
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v6
|
uses: actions/setup-python@v6
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: pip install PyYAML
|
run: pip install PyYAML
|
||||||
|
|
||||||
- name: Validate metrics
|
- name: Validate metrics
|
||||||
run: python3 scripts/get_description.py --validate -c scripts/metrics-config.yml
|
run: python3 scripts/get_description.py --validate -c scripts/metrics-config.yml
|
||||||
|
|||||||
5
.github/workflows/iwyu.yaml
vendored
5
.github/workflows/iwyu.yaml
vendored
@@ -14,8 +14,7 @@ env:
|
|||||||
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation mutation_writer node_ops raft redis replica service
|
CLEANER_DIRS: test/unit exceptions alternator api auth cdc compaction db dht gms index lang message mutation mutation_writer node_ops raft redis replica service
|
||||||
SEASTAR_BAD_INCLUDE_OUTPUT_PATH: build/seastar-bad-include.log
|
SEASTAR_BAD_INCLUDE_OUTPUT_PATH: build/seastar-bad-include.log
|
||||||
|
|
||||||
permissions:
|
permissions: {}
|
||||||
contents: read
|
|
||||||
|
|
||||||
# cancel the in-progress run upon a repush
|
# cancel the in-progress run upon a repush
|
||||||
concurrency:
|
concurrency:
|
||||||
@@ -35,6 +34,8 @@ jobs:
|
|||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
|
- run: |
|
||||||
|
sudo dnf -y install clang-tools-extra
|
||||||
- name: Generate compilation database
|
- name: Generate compilation database
|
||||||
run: |
|
run: |
|
||||||
cmake \
|
cmake \
|
||||||
|
|||||||
2
.github/workflows/read-toolchain.yaml
vendored
2
.github/workflows/read-toolchain.yaml
vendored
@@ -10,8 +10,6 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
read-toolchain:
|
read-toolchain:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
outputs:
|
outputs:
|
||||||
image: ${{ steps.read.outputs.image }}
|
image: ${{ steps.read.outputs.image }}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
53
.github/workflows/trigger-scylla-ci.yaml
vendored
53
.github/workflows/trigger-scylla-ci.yaml
vendored
@@ -1,66 +1,21 @@
|
|||||||
name: Trigger Scylla CI Route
|
name: Trigger Scylla CI Route
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
issue_comment:
|
issue_comment:
|
||||||
types: [created]
|
types: [created]
|
||||||
pull_request_target:
|
|
||||||
types:
|
|
||||||
- unlabeled
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
trigger-jenkins:
|
trigger-jenkins:
|
||||||
if: (github.event_name == 'issue_comment' && github.event.comment.user.login != 'scylladbbot') || github.event.label.name == 'conflicts'
|
if: github.event.comment.user.login != 'scylladbbot' && contains(github.event.comment.body, '@scylladbbot') && contains(github.event.comment.body, 'trigger-ci')
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Verify Org Membership
|
|
||||||
id: verify_author
|
|
||||||
env:
|
|
||||||
EVENT_NAME: ${{ github.event_name }}
|
|
||||||
PR_AUTHOR: ${{ github.event.pull_request.user.login }}
|
|
||||||
PR_ASSOCIATION: ${{ github.event.pull_request.author_association }}
|
|
||||||
COMMENT_AUTHOR: ${{ github.event.comment.user.login }}
|
|
||||||
COMMENT_ASSOCIATION: ${{ github.event.comment.author_association }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
if [[ "$EVENT_NAME" == "pull_request_target" ]]; then
|
|
||||||
AUTHOR="$PR_AUTHOR"
|
|
||||||
ASSOCIATION="$PR_ASSOCIATION"
|
|
||||||
else
|
|
||||||
AUTHOR="$COMMENT_AUTHOR"
|
|
||||||
ASSOCIATION="$COMMENT_ASSOCIATION"
|
|
||||||
fi
|
|
||||||
if [[ "$ASSOCIATION" == "MEMBER" || "$ASSOCIATION" == "OWNER" ]]; then
|
|
||||||
echo "member=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "::warning::${AUTHOR} is not a member of scylladb (association: ${ASSOCIATION}); skipping CI trigger."
|
|
||||||
echo "member=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Validate Comment Trigger
|
|
||||||
if: github.event_name == 'issue_comment'
|
|
||||||
id: verify_comment
|
|
||||||
env:
|
|
||||||
COMMENT_BODY: ${{ github.event.comment.body }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
CLEAN_BODY=$(echo "$COMMENT_BODY" | grep -v '^[[:space:]]*>')
|
|
||||||
|
|
||||||
if echo "$CLEAN_BODY" | grep -qi '@scylladbbot' && echo "$CLEAN_BODY" | grep -qi 'trigger-ci'; then
|
|
||||||
echo "trigger=true" >> $GITHUB_OUTPUT
|
|
||||||
else
|
|
||||||
echo "trigger=false" >> $GITHUB_OUTPUT
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Trigger Scylla-CI-Route Jenkins Job
|
- name: Trigger Scylla-CI-Route Jenkins Job
|
||||||
if: steps.verify_author.outputs.member == 'true' && (github.event_name == 'pull_request_target' || steps.verify_comment.outputs.trigger == 'true')
|
|
||||||
env:
|
env:
|
||||||
JENKINS_USER: ${{ secrets.JENKINS_USERNAME }}
|
JENKINS_USER: ${{ secrets.JENKINS_USERNAME }}
|
||||||
JENKINS_API_TOKEN: ${{ secrets.JENKINS_TOKEN }}
|
JENKINS_API_TOKEN: ${{ secrets.JENKINS_TOKEN }}
|
||||||
JENKINS_URL: "https://jenkins.scylladb.com"
|
JENKINS_URL: "https://jenkins.scylladb.com"
|
||||||
PR_NUMBER: "${{ github.event.issue.number || github.event.pull_request.number }}"
|
|
||||||
PR_REPO_NAME: "${{ github.event.repository.full_name }}"
|
|
||||||
run: |
|
run: |
|
||||||
|
PR_NUMBER=${{ github.event.issue.number }}
|
||||||
|
PR_REPO_NAME=${{ github.event.repository.full_name }}
|
||||||
curl -X POST "$JENKINS_URL/job/releng/job/Scylla-CI-Route/buildWithParameters?PR_NUMBER=$PR_NUMBER&PR_REPO_NAME=$PR_REPO_NAME" \
|
curl -X POST "$JENKINS_URL/job/releng/job/Scylla-CI-Route/buildWithParameters?PR_NUMBER=$PR_NUMBER&PR_REPO_NAME=$PR_REPO_NAME" \
|
||||||
--user "$JENKINS_USER:$JENKINS_API_TOKEN" --fail
|
--user "$JENKINS_USER:$JENKINS_API_TOKEN" --fail -i -v
|
||||||
|
|||||||
3
.github/workflows/trigger_jenkins.yaml
vendored
3
.github/workflows/trigger_jenkins.yaml
vendored
@@ -1,8 +1,5 @@
|
|||||||
name: Trigger next gating
|
name: Trigger next gating
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
|
|||||||
@@ -300,6 +300,7 @@ add_subdirectory(locator)
|
|||||||
add_subdirectory(message)
|
add_subdirectory(message)
|
||||||
add_subdirectory(mutation)
|
add_subdirectory(mutation)
|
||||||
add_subdirectory(mutation_writer)
|
add_subdirectory(mutation_writer)
|
||||||
|
add_subdirectory(node_ops)
|
||||||
add_subdirectory(readers)
|
add_subdirectory(readers)
|
||||||
add_subdirectory(replica)
|
add_subdirectory(replica)
|
||||||
add_subdirectory(raft)
|
add_subdirectory(raft)
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ For further information, please see:
|
|||||||
|
|
||||||
[developer documentation]: HACKING.md
|
[developer documentation]: HACKING.md
|
||||||
[build documentation]: docs/dev/building.md
|
[build documentation]: docs/dev/building.md
|
||||||
[docker image build documentation]: dist/docker/redhat/README.md
|
[docker image build documentation]: dist/docker/debian/README.md
|
||||||
|
|
||||||
## Running Scylla
|
## Running Scylla
|
||||||
|
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ fi
|
|||||||
|
|
||||||
# Default scylla product/version tags
|
# Default scylla product/version tags
|
||||||
PRODUCT=scylla
|
PRODUCT=scylla
|
||||||
VERSION=2026.2.0-dev
|
VERSION=2026.1.0-dev
|
||||||
|
|
||||||
if test -f version
|
if test -f version
|
||||||
then
|
then
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ target_sources(alternator
|
|||||||
consumed_capacity.cc
|
consumed_capacity.cc
|
||||||
ttl.cc
|
ttl.cc
|
||||||
parsed_expression_cache.cc
|
parsed_expression_cache.cc
|
||||||
http_compression.cc
|
|
||||||
${cql_grammar_srcs})
|
${cql_grammar_srcs})
|
||||||
target_include_directories(alternator
|
target_include_directories(alternator
|
||||||
PUBLIC
|
PUBLIC
|
||||||
|
|||||||
@@ -13,8 +13,7 @@
|
|||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include "alternator/auth.hh"
|
#include "alternator/auth.hh"
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
#include "db/consistency_level_type.hh"
|
#include "auth/password_authenticator.hh"
|
||||||
#include "db/system_keyspace.hh"
|
|
||||||
#include "service/storage_proxy.hh"
|
#include "service/storage_proxy.hh"
|
||||||
#include "alternator/executor.hh"
|
#include "alternator/executor.hh"
|
||||||
#include "cql3/selection/selection.hh"
|
#include "cql3/selection/selection.hh"
|
||||||
@@ -26,8 +25,8 @@ namespace alternator {
|
|||||||
|
|
||||||
static logging::logger alogger("alternator-auth");
|
static logging::logger alogger("alternator-auth");
|
||||||
|
|
||||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username) {
|
future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::service& as, std::string username) {
|
||||||
schema_ptr schema = proxy.data_dictionary().find_schema(db::system_keyspace::NAME, "roles");
|
schema_ptr schema = proxy.data_dictionary().find_schema(auth::get_auth_ks_name(as.query_processor()), "roles");
|
||||||
partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username));
|
partition_key pk = partition_key::from_single_value(*schema, utf8_type->decompose(username));
|
||||||
dht::partition_range_vector partition_ranges{dht::partition_range(dht::decorate_key(*schema, pk))};
|
dht::partition_range_vector partition_ranges{dht::partition_range(dht::decorate_key(*schema, pk))};
|
||||||
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
|
std::vector<query::clustering_range> bounds{query::clustering_range::make_open_ended_both_sides()};
|
||||||
@@ -40,7 +39,7 @@ future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::strin
|
|||||||
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id, can_login_col->id}, selection->get_query_options());
|
auto partition_slice = query::partition_slice(std::move(bounds), {}, query::column_id_vector{salted_hash_col->id, can_login_col->id}, selection->get_query_options());
|
||||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice,
|
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice,
|
||||||
proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
proxy.get_max_result_size(partition_slice), query::tombstone_limit(proxy.get_tombstone_limit()));
|
||||||
auto cl = db::consistency_level::LOCAL_ONE;
|
auto cl = auth::password_authenticator::consistency_for_user(username);
|
||||||
|
|
||||||
service::client_state client_state{service::client_state::internal_tag()};
|
service::client_state client_state{service::client_state::internal_tag()};
|
||||||
service::storage_proxy::coordinator_query_result qr = co_await proxy.query(schema, std::move(command), std::move(partition_ranges), cl,
|
service::storage_proxy::coordinator_query_result qr = co_await proxy.query(schema, std::move(command), std::move(partition_ranges), cl,
|
||||||
|
|||||||
@@ -20,6 +20,6 @@ namespace alternator {
|
|||||||
|
|
||||||
using key_cache = utils::loading_cache<std::string, std::string, 1>;
|
using key_cache = utils::loading_cache<std::string, std::string, 1>;
|
||||||
|
|
||||||
future<std::string> get_key_from_roles(service::storage_proxy& proxy, std::string username);
|
future<std::string> get_key_from_roles(service::storage_proxy& proxy, auth::service& as, std::string username);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ comparison_operator_type get_comparison_operator(const rjson::value& comparison_
|
|||||||
if (!comparison_operator.IsString()) {
|
if (!comparison_operator.IsString()) {
|
||||||
throw api_error::validation(fmt::format("Invalid comparison operator definition {}", rjson::print(comparison_operator)));
|
throw api_error::validation(fmt::format("Invalid comparison operator definition {}", rjson::print(comparison_operator)));
|
||||||
}
|
}
|
||||||
std::string op = rjson::to_string(comparison_operator);
|
std::string op = comparison_operator.GetString();
|
||||||
auto it = ops.find(op);
|
auto it = ops.find(op);
|
||||||
if (it == ops.end()) {
|
if (it == ops.end()) {
|
||||||
throw api_error::validation(fmt::format("Unsupported comparison operator {}", op));
|
throw api_error::validation(fmt::format("Unsupported comparison operator {}", op));
|
||||||
@@ -377,8 +377,8 @@ bool check_compare(const rjson::value* v1, const rjson::value& v2, const Compara
|
|||||||
return cmp(unwrap_number(*v1, cmp.diagnostic), unwrap_number(v2, cmp.diagnostic));
|
return cmp(unwrap_number(*v1, cmp.diagnostic), unwrap_number(v2, cmp.diagnostic));
|
||||||
}
|
}
|
||||||
if (kv1.name == "S") {
|
if (kv1.name == "S") {
|
||||||
return cmp(rjson::to_string_view(kv1.value),
|
return cmp(std::string_view(kv1.value.GetString(), kv1.value.GetStringLength()),
|
||||||
rjson::to_string_view(kv2.value));
|
std::string_view(kv2.value.GetString(), kv2.value.GetStringLength()));
|
||||||
}
|
}
|
||||||
if (kv1.name == "B") {
|
if (kv1.name == "B") {
|
||||||
auto d_kv1 = unwrap_bytes(kv1.value, v1_from_query);
|
auto d_kv1 = unwrap_bytes(kv1.value, v1_from_query);
|
||||||
@@ -470,9 +470,9 @@ static bool check_BETWEEN(const rjson::value* v, const rjson::value& lb, const r
|
|||||||
return check_BETWEEN(unwrap_number(*v, diag), unwrap_number(lb, diag), unwrap_number(ub, diag), bounds_from_query);
|
return check_BETWEEN(unwrap_number(*v, diag), unwrap_number(lb, diag), unwrap_number(ub, diag), bounds_from_query);
|
||||||
}
|
}
|
||||||
if (kv_v.name == "S") {
|
if (kv_v.name == "S") {
|
||||||
return check_BETWEEN(rjson::to_string_view(kv_v.value),
|
return check_BETWEEN(std::string_view(kv_v.value.GetString(), kv_v.value.GetStringLength()),
|
||||||
rjson::to_string_view(kv_lb.value),
|
std::string_view(kv_lb.value.GetString(), kv_lb.value.GetStringLength()),
|
||||||
rjson::to_string_view(kv_ub.value),
|
std::string_view(kv_ub.value.GetString(), kv_ub.value.GetStringLength()),
|
||||||
bounds_from_query);
|
bounds_from_query);
|
||||||
}
|
}
|
||||||
if (kv_v.name == "B") {
|
if (kv_v.name == "B") {
|
||||||
@@ -618,7 +618,7 @@ conditional_operator_type get_conditional_operator(const rjson::value& req) {
|
|||||||
// Check if the existing values of the item (previous_item) match the
|
// Check if the existing values of the item (previous_item) match the
|
||||||
// conditions given by the Expected and ConditionalOperator parameters
|
// conditions given by the Expected and ConditionalOperator parameters
|
||||||
// (if they exist) in the request (an UpdateItem, PutItem or DeleteItem).
|
// (if they exist) in the request (an UpdateItem, PutItem or DeleteItem).
|
||||||
// This function can throw a ValidationException API error if there
|
// This function can throw an ValidationException API error if there
|
||||||
// are errors in the format of the condition itself.
|
// are errors in the format of the condition itself.
|
||||||
bool verify_expected(const rjson::value& req, const rjson::value* previous_item) {
|
bool verify_expected(const rjson::value& req, const rjson::value* previous_item) {
|
||||||
const rjson::value* expected = rjson::find(req, "Expected");
|
const rjson::value* expected = rjson::find(req, "Expected");
|
||||||
|
|||||||
@@ -8,8 +8,6 @@
|
|||||||
|
|
||||||
#include "consumed_capacity.hh"
|
#include "consumed_capacity.hh"
|
||||||
#include "error.hh"
|
#include "error.hh"
|
||||||
#include "utils/rjson.hh"
|
|
||||||
#include <fmt/format.h>
|
|
||||||
|
|
||||||
namespace alternator {
|
namespace alternator {
|
||||||
|
|
||||||
@@ -34,18 +32,18 @@ bool consumed_capacity_counter::should_add_capacity(const rjson::value& request)
|
|||||||
if (!return_consumed->IsString()) {
|
if (!return_consumed->IsString()) {
|
||||||
throw api_error::validation("Non-string ReturnConsumedCapacity field in request");
|
throw api_error::validation("Non-string ReturnConsumedCapacity field in request");
|
||||||
}
|
}
|
||||||
std::string_view consumed = rjson::to_string_view(*return_consumed);
|
std::string consumed = return_consumed->GetString();
|
||||||
if (consumed == "INDEXES") {
|
if (consumed == "INDEXES") {
|
||||||
throw api_error::validation("INDEXES consumed capacity is not supported");
|
throw api_error::validation("INDEXES consumed capacity is not supported");
|
||||||
}
|
}
|
||||||
if (consumed != "TOTAL") {
|
if (consumed != "TOTAL") {
|
||||||
throw api_error::validation(fmt::format("Unknown consumed capacity {}", consumed));
|
throw api_error::validation("Unknown consumed capacity "+ consumed);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void consumed_capacity_counter::add_consumed_capacity_to_response_if_needed(rjson::value& response) const noexcept {
|
void consumed_capacity_counter::add_consumed_capacity_to_response_if_needed(rjson::value& response) const noexcept {
|
||||||
if (_should_add_to_response) {
|
if (_should_add_to_reponse) {
|
||||||
auto consumption = rjson::empty_object();
|
auto consumption = rjson::empty_object();
|
||||||
rjson::add(consumption, "CapacityUnits", get_consumed_capacity_units());
|
rjson::add(consumption, "CapacityUnits", get_consumed_capacity_units());
|
||||||
rjson::add(response, "ConsumedCapacity", std::move(consumption));
|
rjson::add(response, "ConsumedCapacity", std::move(consumption));
|
||||||
|
|||||||
@@ -28,9 +28,9 @@ namespace alternator {
|
|||||||
class consumed_capacity_counter {
|
class consumed_capacity_counter {
|
||||||
public:
|
public:
|
||||||
consumed_capacity_counter() = default;
|
consumed_capacity_counter() = default;
|
||||||
consumed_capacity_counter(bool should_add_to_response) : _should_add_to_response(should_add_to_response){}
|
consumed_capacity_counter(bool should_add_to_reponse) : _should_add_to_reponse(should_add_to_reponse){}
|
||||||
bool operator()() const noexcept {
|
bool operator()() const noexcept {
|
||||||
return _should_add_to_response;
|
return _should_add_to_reponse;
|
||||||
}
|
}
|
||||||
|
|
||||||
consumed_capacity_counter& operator +=(uint64_t bytes);
|
consumed_capacity_counter& operator +=(uint64_t bytes);
|
||||||
@@ -44,7 +44,7 @@ public:
|
|||||||
uint64_t _total_bytes = 0;
|
uint64_t _total_bytes = 0;
|
||||||
static bool should_add_capacity(const rjson::value& request);
|
static bool should_add_capacity(const rjson::value& request);
|
||||||
protected:
|
protected:
|
||||||
bool _should_add_to_response = false;
|
bool _should_add_to_reponse = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
class rcu_consumed_capacity_counter : public consumed_capacity_counter {
|
class rcu_consumed_capacity_counter : public consumed_capacity_counter {
|
||||||
|
|||||||
@@ -28,7 +28,6 @@ static logging::logger logger("alternator_controller");
|
|||||||
controller::controller(
|
controller::controller(
|
||||||
sharded<gms::gossiper>& gossiper,
|
sharded<gms::gossiper>& gossiper,
|
||||||
sharded<service::storage_proxy>& proxy,
|
sharded<service::storage_proxy>& proxy,
|
||||||
sharded<service::storage_service>& ss,
|
|
||||||
sharded<service::migration_manager>& mm,
|
sharded<service::migration_manager>& mm,
|
||||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||||
@@ -40,7 +39,6 @@ controller::controller(
|
|||||||
: protocol_server(sg)
|
: protocol_server(sg)
|
||||||
, _gossiper(gossiper)
|
, _gossiper(gossiper)
|
||||||
, _proxy(proxy)
|
, _proxy(proxy)
|
||||||
, _ss(ss)
|
|
||||||
, _mm(mm)
|
, _mm(mm)
|
||||||
, _sys_dist_ks(sys_dist_ks)
|
, _sys_dist_ks(sys_dist_ks)
|
||||||
, _cdc_gen_svc(cdc_gen_svc)
|
, _cdc_gen_svc(cdc_gen_svc)
|
||||||
@@ -91,7 +89,7 @@ future<> controller::start_server() {
|
|||||||
auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> {
|
auto get_timeout_in_ms = [] (const db::config& cfg) -> utils::updateable_value<uint32_t> {
|
||||||
return cfg.alternator_timeout_in_ms;
|
return cfg.alternator_timeout_in_ms;
|
||||||
};
|
};
|
||||||
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_ss), std::ref(_mm), std::ref(_sys_dist_ks),
|
_executor.start(std::ref(_gossiper), std::ref(_proxy), std::ref(_mm), std::ref(_sys_dist_ks),
|
||||||
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value(),
|
sharded_parameter(get_cdc_metadata, std::ref(_cdc_gen_svc)), _ssg.value(),
|
||||||
sharded_parameter(get_timeout_in_ms, std::ref(_config))).get();
|
sharded_parameter(get_timeout_in_ms, std::ref(_config))).get();
|
||||||
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
|
_server.start(std::ref(_executor), std::ref(_proxy), std::ref(_gossiper), std::ref(_auth_service), std::ref(_sl_controller)).get();
|
||||||
@@ -105,23 +103,11 @@ future<> controller::start_server() {
|
|||||||
alternator_port = _config.alternator_port();
|
alternator_port = _config.alternator_port();
|
||||||
_listen_addresses.push_back({addr, *alternator_port});
|
_listen_addresses.push_back({addr, *alternator_port});
|
||||||
}
|
}
|
||||||
std::optional<uint16_t> alternator_port_proxy_protocol;
|
|
||||||
if (_config.alternator_port_proxy_protocol()) {
|
|
||||||
alternator_port_proxy_protocol = _config.alternator_port_proxy_protocol();
|
|
||||||
_listen_addresses.push_back({addr, *alternator_port_proxy_protocol});
|
|
||||||
}
|
|
||||||
std::optional<uint16_t> alternator_https_port;
|
std::optional<uint16_t> alternator_https_port;
|
||||||
std::optional<uint16_t> alternator_https_port_proxy_protocol;
|
|
||||||
std::optional<tls::credentials_builder> creds;
|
std::optional<tls::credentials_builder> creds;
|
||||||
if (_config.alternator_https_port() || _config.alternator_https_port_proxy_protocol()) {
|
if (_config.alternator_https_port()) {
|
||||||
if (_config.alternator_https_port()) {
|
alternator_https_port = _config.alternator_https_port();
|
||||||
alternator_https_port = _config.alternator_https_port();
|
_listen_addresses.push_back({addr, *alternator_https_port});
|
||||||
_listen_addresses.push_back({addr, *alternator_https_port});
|
|
||||||
}
|
|
||||||
if (_config.alternator_https_port_proxy_protocol()) {
|
|
||||||
alternator_https_port_proxy_protocol = _config.alternator_https_port_proxy_protocol();
|
|
||||||
_listen_addresses.push_back({addr, *alternator_https_port_proxy_protocol});
|
|
||||||
}
|
|
||||||
creds.emplace();
|
creds.emplace();
|
||||||
auto opts = _config.alternator_encryption_options();
|
auto opts = _config.alternator_encryption_options();
|
||||||
if (opts.empty()) {
|
if (opts.empty()) {
|
||||||
@@ -147,29 +133,20 @@ future<> controller::start_server() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
_server.invoke_on_all(
|
_server.invoke_on_all(
|
||||||
[this, addr, alternator_port, alternator_https_port, alternator_port_proxy_protocol, alternator_https_port_proxy_protocol, creds = std::move(creds)] (server& server) mutable {
|
[this, addr, alternator_port, alternator_https_port, creds = std::move(creds)] (server& server) mutable {
|
||||||
return server.init(addr, alternator_port, alternator_https_port, alternator_port_proxy_protocol, alternator_https_port_proxy_protocol, creds,
|
return server.init(addr, alternator_port, alternator_https_port, creds,
|
||||||
_config.alternator_enforce_authorization,
|
_config.alternator_enforce_authorization,
|
||||||
_config.alternator_warn_authorization,
|
_config.alternator_warn_authorization,
|
||||||
_config.alternator_max_users_query_size_in_trace_output,
|
_config.alternator_max_users_query_size_in_trace_output,
|
||||||
&_memory_limiter.local().get_semaphore(),
|
&_memory_limiter.local().get_semaphore(),
|
||||||
_config.max_concurrent_requests_per_shard);
|
_config.max_concurrent_requests_per_shard);
|
||||||
}).handle_exception([this, addr, alternator_port, alternator_https_port, alternator_port_proxy_protocol, alternator_https_port_proxy_protocol] (std::exception_ptr ep) {
|
}).handle_exception([this, addr, alternator_port, alternator_https_port] (std::exception_ptr ep) {
|
||||||
logger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}, proxy-protocol port {}, TLS proxy-protocol port {}: {}",
|
logger.error("Failed to set up Alternator HTTP server on {} port {}, TLS port {}: {}",
|
||||||
addr,
|
addr, alternator_port ? std::to_string(*alternator_port) : "OFF", alternator_https_port ? std::to_string(*alternator_https_port) : "OFF", ep);
|
||||||
alternator_port ? std::to_string(*alternator_port) : "OFF",
|
|
||||||
alternator_https_port ? std::to_string(*alternator_https_port) : "OFF",
|
|
||||||
alternator_port_proxy_protocol ? std::to_string(*alternator_port_proxy_protocol) : "OFF",
|
|
||||||
alternator_https_port_proxy_protocol ? std::to_string(*alternator_https_port_proxy_protocol) : "OFF",
|
|
||||||
ep);
|
|
||||||
return stop_server().then([ep = std::move(ep)] { return make_exception_future<>(ep); });
|
return stop_server().then([ep = std::move(ep)] { return make_exception_future<>(ep); });
|
||||||
}).then([addr, alternator_port, alternator_https_port, alternator_port_proxy_protocol, alternator_https_port_proxy_protocol] {
|
}).then([addr, alternator_port, alternator_https_port] {
|
||||||
logger.info("Alternator server listening on {}, HTTP port {}, HTTPS port {}, proxy-protocol port {}, TLS proxy-protocol port {}",
|
logger.info("Alternator server listening on {}, HTTP port {}, HTTPS port {}",
|
||||||
addr,
|
addr, alternator_port ? std::to_string(*alternator_port) : "OFF", alternator_https_port ? std::to_string(*alternator_https_port) : "OFF");
|
||||||
alternator_port ? std::to_string(*alternator_port) : "OFF",
|
|
||||||
alternator_https_port ? std::to_string(*alternator_https_port) : "OFF",
|
|
||||||
alternator_port_proxy_protocol ? std::to_string(*alternator_port_proxy_protocol) : "OFF",
|
|
||||||
alternator_https_port_proxy_protocol ? std::to_string(*alternator_https_port_proxy_protocol) : "OFF");
|
|
||||||
}).get();
|
}).get();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -192,7 +169,7 @@ future<> controller::request_stop_server() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
future<utils::chunked_vector<foreign_ptr<std::unique_ptr<client_data>>>> controller::get_client_data() {
|
future<utils::chunked_vector<client_data>> controller::get_client_data() {
|
||||||
return _server.local().get_client_data();
|
return _server.local().get_client_data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
|
|
||||||
namespace service {
|
namespace service {
|
||||||
class storage_proxy;
|
class storage_proxy;
|
||||||
class storage_service;
|
|
||||||
class migration_manager;
|
class migration_manager;
|
||||||
class memory_limiter;
|
class memory_limiter;
|
||||||
}
|
}
|
||||||
@@ -58,7 +57,6 @@ class server;
|
|||||||
class controller : public protocol_server {
|
class controller : public protocol_server {
|
||||||
sharded<gms::gossiper>& _gossiper;
|
sharded<gms::gossiper>& _gossiper;
|
||||||
sharded<service::storage_proxy>& _proxy;
|
sharded<service::storage_proxy>& _proxy;
|
||||||
sharded<service::storage_service>& _ss;
|
|
||||||
sharded<service::migration_manager>& _mm;
|
sharded<service::migration_manager>& _mm;
|
||||||
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
sharded<db::system_distributed_keyspace>& _sys_dist_ks;
|
||||||
sharded<cdc::generation_service>& _cdc_gen_svc;
|
sharded<cdc::generation_service>& _cdc_gen_svc;
|
||||||
@@ -76,7 +74,6 @@ public:
|
|||||||
controller(
|
controller(
|
||||||
sharded<gms::gossiper>& gossiper,
|
sharded<gms::gossiper>& gossiper,
|
||||||
sharded<service::storage_proxy>& proxy,
|
sharded<service::storage_proxy>& proxy,
|
||||||
sharded<service::storage_service>& ss,
|
|
||||||
sharded<service::migration_manager>& mm,
|
sharded<service::migration_manager>& mm,
|
||||||
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
sharded<db::system_distributed_keyspace>& sys_dist_ks,
|
||||||
sharded<cdc::generation_service>& cdc_gen_svc,
|
sharded<cdc::generation_service>& cdc_gen_svc,
|
||||||
@@ -96,7 +93,7 @@ public:
|
|||||||
// This virtual function is called (on each shard separately) when the
|
// This virtual function is called (on each shard separately) when the
|
||||||
// virtual table "system.clients" is read. It is expected to generate a
|
// virtual table "system.clients" is read. It is expected to generate a
|
||||||
// list of clients connected to this server (on this shard).
|
// list of clients connected to this server (on this shard).
|
||||||
virtual future<utils::chunked_vector<foreign_ptr<std::unique_ptr<client_data>>>> get_client_data() override;
|
virtual future<utils::chunked_vector<client_data>> get_client_data() override;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
#include "auth/service.hh"
|
#include "auth/service.hh"
|
||||||
#include "db/config.hh"
|
#include "db/config.hh"
|
||||||
#include "db/view/view_build_status.hh"
|
#include "db/view/view_build_status.hh"
|
||||||
#include "locator/tablets.hh"
|
|
||||||
#include "mutation/tombstone.hh"
|
#include "mutation/tombstone.hh"
|
||||||
#include "locator/abstract_replication_strategy.hh"
|
#include "locator/abstract_replication_strategy.hh"
|
||||||
#include "utils/log.hh"
|
#include "utils/log.hh"
|
||||||
@@ -63,20 +62,11 @@
|
|||||||
#include "types/types.hh"
|
#include "types/types.hh"
|
||||||
#include "db/system_keyspace.hh"
|
#include "db/system_keyspace.hh"
|
||||||
#include "cql3/statements/ks_prop_defs.hh"
|
#include "cql3/statements/ks_prop_defs.hh"
|
||||||
#include "alternator/ttl_tag.hh"
|
|
||||||
|
|
||||||
using namespace std::chrono_literals;
|
using namespace std::chrono_literals;
|
||||||
|
|
||||||
logging::logger elogger("alternator-executor");
|
logging::logger elogger("alternator-executor");
|
||||||
|
|
||||||
namespace std {
|
|
||||||
template <> struct hash<std::pair<sstring, sstring>> {
|
|
||||||
size_t operator () (const std::pair<sstring, sstring>& p) const {
|
|
||||||
return std::hash<sstring>()(p.first) * 1009 + std::hash<sstring>()(p.second) * 3;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace alternator {
|
namespace alternator {
|
||||||
|
|
||||||
// Alternator-specific table properties stored as hidden table tags:
|
// Alternator-specific table properties stored as hidden table tags:
|
||||||
@@ -165,7 +155,7 @@ static map_type attrs_type() {
|
|||||||
|
|
||||||
static const column_definition& attrs_column(const schema& schema) {
|
static const column_definition& attrs_column(const schema& schema) {
|
||||||
const column_definition* cdef = schema.get_column_definition(bytes(executor::ATTRS_COLUMN_NAME));
|
const column_definition* cdef = schema.get_column_definition(bytes(executor::ATTRS_COLUMN_NAME));
|
||||||
throwing_assert(cdef);
|
SCYLLA_ASSERT(cdef);
|
||||||
return *cdef;
|
return *cdef;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +228,7 @@ static void validate_is_object(const rjson::value& value, const char* caller) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This function assumes the given value is an object and returns requested member value.
|
// This function assumes the given value is an object and returns requested member value.
|
||||||
// If it is not possible, an api_error::validation is thrown.
|
// If it is not possible an api_error::validation is thrown.
|
||||||
static const rjson::value& get_member(const rjson::value& obj, const char* member_name, const char* caller) {
|
static const rjson::value& get_member(const rjson::value& obj, const char* member_name, const char* caller) {
|
||||||
validate_is_object(obj, caller);
|
validate_is_object(obj, caller);
|
||||||
const rjson::value* ret = rjson::find(obj, member_name);
|
const rjson::value* ret = rjson::find(obj, member_name);
|
||||||
@@ -250,7 +240,7 @@ static const rjson::value& get_member(const rjson::value& obj, const char* membe
|
|||||||
|
|
||||||
|
|
||||||
// This function assumes the given value is an object with a single member, and returns this member.
|
// This function assumes the given value is an object with a single member, and returns this member.
|
||||||
// In case the requirements are not met, an api_error::validation is thrown.
|
// In case the requirements are not met an api_error::validation is thrown.
|
||||||
static const rjson::value::Member& get_single_member(const rjson::value& v, const char* caller) {
|
static const rjson::value::Member& get_single_member(const rjson::value& v, const char* caller) {
|
||||||
if (!v.IsObject() || v.MemberCount() != 1) {
|
if (!v.IsObject() || v.MemberCount() != 1) {
|
||||||
throw api_error::validation(format("{}: expected an object with a single member.", caller));
|
throw api_error::validation(format("{}: expected an object with a single member.", caller));
|
||||||
@@ -258,66 +248,14 @@ static const rjson::value::Member& get_single_member(const rjson::value& v, cons
|
|||||||
return *(v.MemberBegin());
|
return *(v.MemberBegin());
|
||||||
}
|
}
|
||||||
|
|
||||||
class executor::describe_table_info_manager : public service::migration_listener::empty_listener {
|
|
||||||
executor &_executor;
|
|
||||||
|
|
||||||
struct table_info {
|
|
||||||
utils::simple_value_with_expiry<std::uint64_t> size_in_bytes;
|
|
||||||
};
|
|
||||||
std::unordered_map<std::pair<sstring, sstring>, table_info> info_for_tables;
|
|
||||||
bool active = false;
|
|
||||||
|
|
||||||
public:
|
|
||||||
describe_table_info_manager(executor& executor) : _executor(executor) {
|
|
||||||
_executor._proxy.data_dictionary().real_database_ptr()->get_notifier().register_listener(this);
|
|
||||||
active = true;
|
|
||||||
}
|
|
||||||
describe_table_info_manager(const describe_table_info_manager &) = delete;
|
|
||||||
describe_table_info_manager(describe_table_info_manager&&) = delete;
|
|
||||||
~describe_table_info_manager() {
|
|
||||||
if (active) {
|
|
||||||
on_fatal_internal_error(elogger, "describe_table_info_manager was not stopped before destruction");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
describe_table_info_manager &operator = (const describe_table_info_manager &) = delete;
|
|
||||||
describe_table_info_manager &operator = (describe_table_info_manager&&) = delete;
|
|
||||||
|
|
||||||
static std::chrono::high_resolution_clock::time_point now() {
|
|
||||||
return std::chrono::high_resolution_clock::now();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<std::uint64_t> get_cached_size_in_bytes(const sstring &ks_name, const sstring &cf_name) const {
|
|
||||||
auto it = info_for_tables.find({ks_name, cf_name});
|
|
||||||
if (it != info_for_tables.end()) {
|
|
||||||
return it->second.size_in_bytes.get();
|
|
||||||
}
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
void cache_size_in_bytes(sstring ks_name, sstring cf_name, std::uint64_t size_in_bytes, std::chrono::high_resolution_clock::time_point expiry) {
|
|
||||||
info_for_tables[{std::move(ks_name), std::move(cf_name)}].size_in_bytes.set_if_longer_expiry(size_in_bytes, expiry);
|
|
||||||
}
|
|
||||||
future<> stop() {
|
|
||||||
co_await _executor._proxy.data_dictionary().real_database_ptr()->get_notifier().unregister_listener(this);
|
|
||||||
active = false;
|
|
||||||
co_return;
|
|
||||||
}
|
|
||||||
void on_drop_column_family(const sstring& ks_name, const sstring& cf_name) override {
|
|
||||||
if (!ks_name.starts_with(executor::KEYSPACE_NAME_PREFIX)) return;
|
|
||||||
info_for_tables.erase({ks_name, cf_name});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
executor::executor(gms::gossiper& gossiper,
|
executor::executor(gms::gossiper& gossiper,
|
||||||
service::storage_proxy& proxy,
|
service::storage_proxy& proxy,
|
||||||
service::storage_service& ss,
|
|
||||||
service::migration_manager& mm,
|
service::migration_manager& mm,
|
||||||
db::system_distributed_keyspace& sdks,
|
db::system_distributed_keyspace& sdks,
|
||||||
cdc::metadata& cdc_metadata,
|
cdc::metadata& cdc_metadata,
|
||||||
smp_service_group ssg,
|
smp_service_group ssg,
|
||||||
utils::updateable_value<uint32_t> default_timeout_in_ms)
|
utils::updateable_value<uint32_t> default_timeout_in_ms)
|
||||||
: _gossiper(gossiper),
|
: _gossiper(gossiper),
|
||||||
_ss(ss),
|
|
||||||
_proxy(proxy),
|
_proxy(proxy),
|
||||||
_mm(mm),
|
_mm(mm),
|
||||||
_sdks(sdks),
|
_sdks(sdks),
|
||||||
@@ -330,7 +268,6 @@ executor::executor(gms::gossiper& gossiper,
|
|||||||
_stats))
|
_stats))
|
||||||
{
|
{
|
||||||
s_default_timeout_in_ms = std::move(default_timeout_in_ms);
|
s_default_timeout_in_ms = std::move(default_timeout_in_ms);
|
||||||
_describe_table_info_manager = std::make_unique<describe_table_info_manager>(*this);
|
|
||||||
register_metrics(_metrics, _stats);
|
register_metrics(_metrics, _stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -482,7 +419,7 @@ static std::optional<std::string> find_table_name(const rjson::value& request) {
|
|||||||
if (!table_name_value->IsString()) {
|
if (!table_name_value->IsString()) {
|
||||||
throw api_error::validation("Non-string TableName field in request");
|
throw api_error::validation("Non-string TableName field in request");
|
||||||
}
|
}
|
||||||
std::string table_name = rjson::to_string(*table_name_value);
|
std::string table_name = table_name_value->GetString();
|
||||||
return table_name;
|
return table_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -609,7 +546,7 @@ get_table_or_view(service::storage_proxy& proxy, const rjson::value& request) {
|
|||||||
// does exist but the index does not (ValidationException).
|
// does exist but the index does not (ValidationException).
|
||||||
if (proxy.data_dictionary().has_schema(keyspace_name, orig_table_name)) {
|
if (proxy.data_dictionary().has_schema(keyspace_name, orig_table_name)) {
|
||||||
throw api_error::validation(
|
throw api_error::validation(
|
||||||
fmt::format("Requested resource not found: Index '{}' for table '{}'", rjson::to_string_view(*index_name), orig_table_name));
|
fmt::format("Requested resource not found: Index '{}' for table '{}'", index_name->GetString(), orig_table_name));
|
||||||
} else {
|
} else {
|
||||||
throw api_error::resource_not_found(
|
throw api_error::resource_not_found(
|
||||||
fmt::format("Requested resource not found: Table: {} not found", orig_table_name));
|
fmt::format("Requested resource not found: Table: {} not found", orig_table_name));
|
||||||
@@ -650,7 +587,7 @@ static std::string get_string_attribute(const rjson::value& value, std::string_v
|
|||||||
throw api_error::validation(fmt::format("Expected string value for attribute {}, got: {}",
|
throw api_error::validation(fmt::format("Expected string value for attribute {}, got: {}",
|
||||||
attribute_name, value));
|
attribute_name, value));
|
||||||
}
|
}
|
||||||
return rjson::to_string(*attribute_value);
|
return std::string(attribute_value->GetString(), attribute_value->GetStringLength());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convenience function for getting the value of a boolean attribute, or a
|
// Convenience function for getting the value of a boolean attribute, or a
|
||||||
@@ -683,7 +620,7 @@ static std::optional<int> get_int_attribute(const rjson::value& value, std::stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sets a KeySchema object inside the given JSON parent describing the key
|
// Sets a KeySchema object inside the given JSON parent describing the key
|
||||||
// attributes of the given schema as being either HASH or RANGE keys.
|
// attributes of the the given schema as being either HASH or RANGE keys.
|
||||||
// Additionally, adds to a given map mappings between the key attribute
|
// Additionally, adds to a given map mappings between the key attribute
|
||||||
// names and their type (as a DynamoDB type string).
|
// names and their type (as a DynamoDB type string).
|
||||||
void executor::describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>* attribute_types, const std::map<sstring, sstring> *tags) {
|
void executor::describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>* attribute_types, const std::map<sstring, sstring> *tags) {
|
||||||
@@ -815,44 +752,12 @@ static future<bool> is_view_built(
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> executor::cache_newly_calculated_size_on_all_shards(schema_ptr schema, std::uint64_t size_in_bytes, std::chrono::nanoseconds ttl) {
|
static future<rjson::value> fill_table_description(schema_ptr schema, table_status tbl_status, service::storage_proxy& proxy, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit)
|
||||||
auto expiry = describe_table_info_manager::now() + ttl;
|
|
||||||
return container().invoke_on_all(
|
|
||||||
[schema, size_in_bytes, expiry] (executor& exec) {
|
|
||||||
exec._describe_table_info_manager->cache_size_in_bytes(schema->ks_name(), schema->cf_name(), size_in_bytes, expiry);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> executor::fill_table_size(rjson::value &table_description, schema_ptr schema, bool deleting) {
|
|
||||||
auto cached_size = _describe_table_info_manager->get_cached_size_in_bytes(schema->ks_name(), schema->cf_name());
|
|
||||||
std::uint64_t total_size = 0;
|
|
||||||
if (cached_size) {
|
|
||||||
total_size = *cached_size;
|
|
||||||
} else {
|
|
||||||
// there's no point in trying to estimate value of table that is being deleted, as other nodes more often than not might
|
|
||||||
// move forward with deletion faster than we calculate the size
|
|
||||||
if (!deleting) {
|
|
||||||
total_size = co_await _ss.estimate_total_sstable_volume(schema->id(), service::storage_service::ignore_errors::yes);
|
|
||||||
const auto expiry = std::chrono::seconds{ _proxy.data_dictionary().get_config().alternator_describe_table_info_cache_validity_in_seconds() };
|
|
||||||
// Note: we don't care when the notification of other shards will finish, as long as it will be done
|
|
||||||
// it's possible to get into race condition (next DescribeTable comes to other shard, that new shard doesn't have
|
|
||||||
// the size yet, so it will calculate it again) - this is not a problem, because it will call cache_newly_calculated_size_on_all_shards
|
|
||||||
// with expiry, which is extremely unlikely to be exactly the same as the previous one, all shards will keep the size coming with expiry that is further into the future.
|
|
||||||
// In case of the same expiry, some shards will have different size, which means DescribeTable will return different values depending on the shard
|
|
||||||
// which is also fine, as the specification doesn't give precision guarantees of any kind.
|
|
||||||
co_await cache_newly_calculated_size_on_all_shards(schema, total_size, expiry);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rjson::add(table_description, "TableSizeBytes", total_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
future<rjson::value> executor::fill_table_description(schema_ptr schema, table_status tbl_status, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit)
|
|
||||||
{
|
{
|
||||||
rjson::value table_description = rjson::empty_object();
|
rjson::value table_description = rjson::empty_object();
|
||||||
auto tags_ptr = db::get_tags_of_table(schema);
|
auto tags_ptr = db::get_tags_of_table(schema);
|
||||||
|
|
||||||
rjson::add(table_description, "TableName", rjson::from_string(schema->cf_name()));
|
rjson::add(table_description, "TableName", rjson::from_string(schema->cf_name()));
|
||||||
co_await fill_table_size(table_description, schema, tbl_status == table_status::deleting);
|
|
||||||
|
|
||||||
auto creation_timestamp = get_table_creation_time(*schema);
|
auto creation_timestamp = get_table_creation_time(*schema);
|
||||||
|
|
||||||
@@ -896,7 +801,9 @@ future<rjson::value> executor::fill_table_description(schema_ptr schema, table_s
|
|||||||
rjson::add(table_description["ProvisionedThroughput"], "WriteCapacityUnits", wcu);
|
rjson::add(table_description["ProvisionedThroughput"], "WriteCapacityUnits", wcu);
|
||||||
rjson::add(table_description["ProvisionedThroughput"], "NumberOfDecreasesToday", 0);
|
rjson::add(table_description["ProvisionedThroughput"], "NumberOfDecreasesToday", 0);
|
||||||
|
|
||||||
data_dictionary::table t = _proxy.data_dictionary().find_column_family(schema);
|
|
||||||
|
|
||||||
|
data_dictionary::table t = proxy.data_dictionary().find_column_family(schema);
|
||||||
|
|
||||||
if (tbl_status != table_status::deleting) {
|
if (tbl_status != table_status::deleting) {
|
||||||
rjson::add(table_description, "CreationDateTime", rjson::value(creation_timestamp));
|
rjson::add(table_description, "CreationDateTime", rjson::value(creation_timestamp));
|
||||||
@@ -917,7 +824,7 @@ future<rjson::value> executor::fill_table_description(schema_ptr schema, table_s
|
|||||||
sstring index_name = cf_name.substr(delim_it + 1);
|
sstring index_name = cf_name.substr(delim_it + 1);
|
||||||
rjson::add(view_entry, "IndexName", rjson::from_string(index_name));
|
rjson::add(view_entry, "IndexName", rjson::from_string(index_name));
|
||||||
rjson::add(view_entry, "IndexArn", generate_arn_for_index(*schema, index_name));
|
rjson::add(view_entry, "IndexArn", generate_arn_for_index(*schema, index_name));
|
||||||
// Add index's KeySchema and collect types for AttributeDefinitions:
|
// Add indexes's KeySchema and collect types for AttributeDefinitions:
|
||||||
executor::describe_key_schema(view_entry, *vptr, key_attribute_types, db::get_tags_of_table(vptr));
|
executor::describe_key_schema(view_entry, *vptr, key_attribute_types, db::get_tags_of_table(vptr));
|
||||||
// Add projection type
|
// Add projection type
|
||||||
rjson::value projection = rjson::empty_object();
|
rjson::value projection = rjson::empty_object();
|
||||||
@@ -933,7 +840,7 @@ future<rjson::value> executor::fill_table_description(schema_ptr schema, table_s
|
|||||||
// (for a built view) or CREATING+Backfilling (if view building
|
// (for a built view) or CREATING+Backfilling (if view building
|
||||||
// is in progress).
|
// is in progress).
|
||||||
if (!is_lsi) {
|
if (!is_lsi) {
|
||||||
if (co_await is_view_built(vptr, _proxy, client_state, trace_state, permit)) {
|
if (co_await is_view_built(vptr, proxy, client_state, trace_state, permit)) {
|
||||||
rjson::add(view_entry, "IndexStatus", "ACTIVE");
|
rjson::add(view_entry, "IndexStatus", "ACTIVE");
|
||||||
} else {
|
} else {
|
||||||
rjson::add(view_entry, "IndexStatus", "CREATING");
|
rjson::add(view_entry, "IndexStatus", "CREATING");
|
||||||
@@ -961,8 +868,9 @@ future<rjson::value> executor::fill_table_description(schema_ptr schema, table_s
|
|||||||
}
|
}
|
||||||
rjson::add(table_description, "AttributeDefinitions", std::move(attribute_definitions));
|
rjson::add(table_description, "AttributeDefinitions", std::move(attribute_definitions));
|
||||||
}
|
}
|
||||||
executor::supplement_table_stream_info(table_description, *schema, _proxy);
|
executor::supplement_table_stream_info(table_description, *schema, proxy);
|
||||||
|
|
||||||
|
// FIXME: still missing some response fields (issue #5026)
|
||||||
co_return table_description;
|
co_return table_description;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -982,7 +890,7 @@ future<executor::request_return_type> executor::describe_table(client_state& cli
|
|||||||
get_stats_from_schema(_proxy, *schema)->api_operations.describe_table++;
|
get_stats_from_schema(_proxy, *schema)->api_operations.describe_table++;
|
||||||
tracing::add_alternator_table_name(trace_state, schema->cf_name());
|
tracing::add_alternator_table_name(trace_state, schema->cf_name());
|
||||||
|
|
||||||
rjson::value table_description = co_await fill_table_description(schema, table_status::active, client_state, trace_state, permit);
|
rjson::value table_description = co_await fill_table_description(schema, table_status::active, _proxy, client_state, trace_state, permit);
|
||||||
rjson::value response = rjson::empty_object();
|
rjson::value response = rjson::empty_object();
|
||||||
rjson::add(response, "Table", std::move(table_description));
|
rjson::add(response, "Table", std::move(table_description));
|
||||||
elogger.trace("returning {}", response);
|
elogger.trace("returning {}", response);
|
||||||
@@ -1085,7 +993,7 @@ future<executor::request_return_type> executor::delete_table(client_state& clien
|
|||||||
auto& p = _proxy.container();
|
auto& p = _proxy.container();
|
||||||
|
|
||||||
schema_ptr schema = get_table(_proxy, request);
|
schema_ptr schema = get_table(_proxy, request);
|
||||||
rjson::value table_description = co_await fill_table_description(schema, table_status::deleting, client_state, trace_state, permit);
|
rjson::value table_description = co_await fill_table_description(schema, table_status::deleting, _proxy, client_state, trace_state, permit);
|
||||||
co_await verify_permission(_enforce_authorization, _warn_authorization, client_state, schema, auth::permission::DROP, _stats);
|
co_await verify_permission(_enforce_authorization, _warn_authorization, client_state, schema, auth::permission::DROP, _stats);
|
||||||
co_await _mm.container().invoke_on(0, [&, cs = client_state.move_to_other_shard()] (service::migration_manager& mm) -> future<> {
|
co_await _mm.container().invoke_on(0, [&, cs = client_state.move_to_other_shard()] (service::migration_manager& mm) -> future<> {
|
||||||
size_t retries = mm.get_concurrent_ddl_retries();
|
size_t retries = mm.get_concurrent_ddl_retries();
|
||||||
@@ -1172,8 +1080,8 @@ static void add_column(schema_builder& builder, const std::string& name, const r
|
|||||||
}
|
}
|
||||||
for (auto it = attribute_definitions.Begin(); it != attribute_definitions.End(); ++it) {
|
for (auto it = attribute_definitions.Begin(); it != attribute_definitions.End(); ++it) {
|
||||||
const rjson::value& attribute_info = *it;
|
const rjson::value& attribute_info = *it;
|
||||||
if (rjson::to_string_view(attribute_info["AttributeName"]) == name) {
|
if (attribute_info["AttributeName"].GetString() == name) {
|
||||||
std::string_view type = rjson::to_string_view(attribute_info["AttributeType"]);
|
auto type = attribute_info["AttributeType"].GetString();
|
||||||
data_type dt = parse_key_type(type);
|
data_type dt = parse_key_type(type);
|
||||||
if (computed_column) {
|
if (computed_column) {
|
||||||
// Computed column for GSI (doesn't choose a real column as-is
|
// Computed column for GSI (doesn't choose a real column as-is
|
||||||
@@ -1208,7 +1116,7 @@ static std::pair<std::string, std::string> parse_key_schema(const rjson::value&
|
|||||||
throw api_error::validation("First element of KeySchema must be an object");
|
throw api_error::validation("First element of KeySchema must be an object");
|
||||||
}
|
}
|
||||||
const rjson::value *v = rjson::find((*key_schema)[0], "KeyType");
|
const rjson::value *v = rjson::find((*key_schema)[0], "KeyType");
|
||||||
if (!v || !v->IsString() || rjson::to_string_view(*v) != "HASH") {
|
if (!v || !v->IsString() || v->GetString() != std::string("HASH")) {
|
||||||
throw api_error::validation("First key in KeySchema must be a HASH key");
|
throw api_error::validation("First key in KeySchema must be a HASH key");
|
||||||
}
|
}
|
||||||
v = rjson::find((*key_schema)[0], "AttributeName");
|
v = rjson::find((*key_schema)[0], "AttributeName");
|
||||||
@@ -1216,14 +1124,14 @@ static std::pair<std::string, std::string> parse_key_schema(const rjson::value&
|
|||||||
throw api_error::validation("First key in KeySchema must have string AttributeName");
|
throw api_error::validation("First key in KeySchema must have string AttributeName");
|
||||||
}
|
}
|
||||||
validate_attr_name_length(supplementary_context, v->GetStringLength(), true, "HASH key in KeySchema - ");
|
validate_attr_name_length(supplementary_context, v->GetStringLength(), true, "HASH key in KeySchema - ");
|
||||||
std::string hash_key = rjson::to_string(*v);
|
std::string hash_key = v->GetString();
|
||||||
std::string range_key;
|
std::string range_key;
|
||||||
if (key_schema->Size() == 2) {
|
if (key_schema->Size() == 2) {
|
||||||
if (!(*key_schema)[1].IsObject()) {
|
if (!(*key_schema)[1].IsObject()) {
|
||||||
throw api_error::validation("Second element of KeySchema must be an object");
|
throw api_error::validation("Second element of KeySchema must be an object");
|
||||||
}
|
}
|
||||||
v = rjson::find((*key_schema)[1], "KeyType");
|
v = rjson::find((*key_schema)[1], "KeyType");
|
||||||
if (!v || !v->IsString() || rjson::to_string_view(*v) != "RANGE") {
|
if (!v || !v->IsString() || v->GetString() != std::string("RANGE")) {
|
||||||
throw api_error::validation("Second key in KeySchema must be a RANGE key");
|
throw api_error::validation("Second key in KeySchema must be a RANGE key");
|
||||||
}
|
}
|
||||||
v = rjson::find((*key_schema)[1], "AttributeName");
|
v = rjson::find((*key_schema)[1], "AttributeName");
|
||||||
@@ -1649,8 +1557,9 @@ static future<> mark_view_schemas_as_built(utils::chunked_vector<mutation>& out,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
future<executor::request_return_type> executor::create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, bool enforce_authorization, bool warn_authorization, const db::tablets_mode_t::mode tablets_mode) {
|
static future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request,
|
||||||
throwing_assert(this_shard_id() == 0);
|
service::storage_proxy& sp, service::migration_manager& mm, gms::gossiper& gossiper, bool enforce_authorization, bool warn_authorization, stats& stats, const db::tablets_mode_t::mode tablets_mode) {
|
||||||
|
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||||
|
|
||||||
// We begin by parsing and validating the content of the CreateTable
|
// We begin by parsing and validating the content of the CreateTable
|
||||||
// command. We can't inspect the current database schema at this point
|
// command. We can't inspect the current database schema at this point
|
||||||
@@ -1836,7 +1745,7 @@ future<executor::request_return_type> executor::create_table_on_shard0(service::
|
|||||||
|
|
||||||
rjson::value* stream_specification = rjson::find(request, "StreamSpecification");
|
rjson::value* stream_specification = rjson::find(request, "StreamSpecification");
|
||||||
if (stream_specification && stream_specification->IsObject()) {
|
if (stream_specification && stream_specification->IsObject()) {
|
||||||
if (executor::add_stream_options(*stream_specification, builder, _proxy)) {
|
if (executor::add_stream_options(*stream_specification, builder, sp)) {
|
||||||
validate_cdc_log_name_length(builder.cf_name());
|
validate_cdc_log_name_length(builder.cf_name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1855,7 +1764,7 @@ future<executor::request_return_type> executor::create_table_on_shard0(service::
|
|||||||
set_table_creation_time(tags_map, db_clock::now());
|
set_table_creation_time(tags_map, db_clock::now());
|
||||||
builder.add_extension(db::tags_extension::NAME, ::make_shared<db::tags_extension>(tags_map));
|
builder.add_extension(db::tags_extension::NAME, ::make_shared<db::tags_extension>(tags_map));
|
||||||
|
|
||||||
co_await verify_create_permission(enforce_authorization, warn_authorization, client_state, _stats);
|
co_await verify_create_permission(enforce_authorization, warn_authorization, client_state, stats);
|
||||||
|
|
||||||
schema_ptr schema = builder.build();
|
schema_ptr schema = builder.build();
|
||||||
for (auto& view_builder : view_builders) {
|
for (auto& view_builder : view_builders) {
|
||||||
@@ -1871,49 +1780,33 @@ future<executor::request_return_type> executor::create_table_on_shard0(service::
|
|||||||
view_builder.with_view_info(schema, include_all_columns, ""/*where clause*/);
|
view_builder.with_view_info(schema, include_all_columns, ""/*where clause*/);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t retries = _mm.get_concurrent_ddl_retries();
|
size_t retries = mm.get_concurrent_ddl_retries();
|
||||||
for (;;) {
|
for (;;) {
|
||||||
auto group0_guard = co_await _mm.start_group0_operation();
|
auto group0_guard = co_await mm.start_group0_operation();
|
||||||
auto ts = group0_guard.write_timestamp();
|
auto ts = group0_guard.write_timestamp();
|
||||||
utils::chunked_vector<mutation> schema_mutations;
|
utils::chunked_vector<mutation> schema_mutations;
|
||||||
auto ksm = create_keyspace_metadata(keyspace_name, _proxy, _gossiper, ts, tags_map, _proxy.features(), tablets_mode);
|
auto ksm = create_keyspace_metadata(keyspace_name, sp, gossiper, ts, tags_map, sp.features(), tablets_mode);
|
||||||
locator::replication_strategy_params params(ksm->strategy_options(), ksm->initial_tablets(), ksm->consistency_option());
|
|
||||||
const auto& topo = _proxy.local_db().get_token_metadata().get_topology();
|
|
||||||
auto rs = locator::abstract_replication_strategy::create_replication_strategy(ksm->strategy_name(), params, topo);
|
|
||||||
// Alternator Streams doesn't yet work when the table uses tablets (#23838)
|
// Alternator Streams doesn't yet work when the table uses tablets (#23838)
|
||||||
if (stream_specification && stream_specification->IsObject()) {
|
if (stream_specification && stream_specification->IsObject()) {
|
||||||
auto stream_enabled = rjson::find(*stream_specification, "StreamEnabled");
|
auto stream_enabled = rjson::find(*stream_specification, "StreamEnabled");
|
||||||
if (stream_enabled && stream_enabled->IsBool() && stream_enabled->GetBool()) {
|
if (stream_enabled && stream_enabled->IsBool() && stream_enabled->GetBool()) {
|
||||||
|
locator::replication_strategy_params params(ksm->strategy_options(), ksm->initial_tablets(), ksm->consistency_option());
|
||||||
|
const auto& topo = sp.local_db().get_token_metadata().get_topology();
|
||||||
|
auto rs = locator::abstract_replication_strategy::create_replication_strategy(ksm->strategy_name(), params, topo);
|
||||||
if (rs->uses_tablets()) {
|
if (rs->uses_tablets()) {
|
||||||
co_return api_error::validation("Streams not yet supported on a table using tablets (issue #23838). "
|
co_return api_error::validation("Streams not yet supported on a table using tablets (issue #23838). "
|
||||||
"If you want to use streams, create a table with vnodes by setting the tag 'system:initial_tablets' set to 'none'.");
|
"If you want to use streams, create a table with vnodes by setting the tag 'system:initial_tablets' set to 'none'.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Creating an index in tablets mode requires the keyspace to be RF-rack-valid.
|
|
||||||
// GSI and LSI indexes are based on materialized views which require RF-rack-validity to avoid consistency issues.
|
|
||||||
if (!view_builders.empty() || _proxy.data_dictionary().get_config().rf_rack_valid_keyspaces()) {
|
|
||||||
try {
|
|
||||||
locator::assert_rf_rack_valid_keyspace(keyspace_name, _proxy.local_db().get_token_metadata_ptr(), *rs);
|
|
||||||
} catch (const std::invalid_argument& ex) {
|
|
||||||
if (!view_builders.empty()) {
|
|
||||||
co_return api_error::validation(fmt::format("GlobalSecondaryIndexes and LocalSecondaryIndexes on a table "
|
|
||||||
"using tablets require the number of racks in the cluster to be either 1 or 3"));
|
|
||||||
} else {
|
|
||||||
co_return api_error::validation(fmt::format("Cannot create table '{}' with tablets: the configuration "
|
|
||||||
"option 'rf_rack_valid_keyspaces' is enabled, which enforces that tables using tablets can only be created in clusters "
|
|
||||||
"that have either 1 or 3 racks", table_name));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
schema_mutations = service::prepare_new_keyspace_announcement(_proxy.local_db(), ksm, ts);
|
schema_mutations = service::prepare_new_keyspace_announcement(sp.local_db(), ksm, ts);
|
||||||
} catch (exceptions::already_exists_exception&) {
|
} catch (exceptions::already_exists_exception&) {
|
||||||
if (_proxy.data_dictionary().has_schema(keyspace_name, table_name)) {
|
if (sp.data_dictionary().has_schema(keyspace_name, table_name)) {
|
||||||
co_return api_error::resource_in_use(fmt::format("Table {} already exists", table_name));
|
co_return api_error::resource_in_use(fmt::format("Table {} already exists", table_name));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_proxy.data_dictionary().try_find_table(schema->id())) {
|
if (sp.data_dictionary().try_find_table(schema->id())) {
|
||||||
// This should never happen, the ID is supposed to be unique
|
// This should never happen, the ID is supposed to be unique
|
||||||
co_return api_error::internal(format("Table with ID {} already exists", schema->id()));
|
co_return api_error::internal(format("Table with ID {} already exists", schema->id()));
|
||||||
}
|
}
|
||||||
@@ -1922,9 +1815,9 @@ future<executor::request_return_type> executor::create_table_on_shard0(service::
|
|||||||
for (schema_builder& view_builder : view_builders) {
|
for (schema_builder& view_builder : view_builders) {
|
||||||
schemas.push_back(view_builder.build());
|
schemas.push_back(view_builder.build());
|
||||||
}
|
}
|
||||||
co_await service::prepare_new_column_families_announcement(schema_mutations, _proxy, *ksm, schemas, ts);
|
co_await service::prepare_new_column_families_announcement(schema_mutations, sp, *ksm, schemas, ts);
|
||||||
if (ksm->uses_tablets()) {
|
if (ksm->uses_tablets()) {
|
||||||
co_await mark_view_schemas_as_built(schema_mutations, schemas, ts, _proxy);
|
co_await mark_view_schemas_as_built(schema_mutations, schemas, ts, sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a role is allowed to create a table, we must give it permissions to
|
// If a role is allowed to create a table, we must give it permissions to
|
||||||
@@ -1949,7 +1842,7 @@ future<executor::request_return_type> executor::create_table_on_shard0(service::
|
|||||||
}
|
}
|
||||||
std::tie(schema_mutations, group0_guard) = co_await std::move(mc).extract();
|
std::tie(schema_mutations, group0_guard) = co_await std::move(mc).extract();
|
||||||
try {
|
try {
|
||||||
co_await _mm.announce(std::move(schema_mutations), std::move(group0_guard), fmt::format("alternator-executor: create {} table", table_name));
|
co_await mm.announce(std::move(schema_mutations), std::move(group0_guard), fmt::format("alternator-executor: create {} table", table_name));
|
||||||
break;
|
break;
|
||||||
} catch (const service::group0_concurrent_modification& ex) {
|
} catch (const service::group0_concurrent_modification& ex) {
|
||||||
elogger.info("Failed to execute CreateTable {} due to concurrent schema modifications. {}.",
|
elogger.info("Failed to execute CreateTable {} due to concurrent schema modifications. {}.",
|
||||||
@@ -1961,9 +1854,9 @@ future<executor::request_return_type> executor::create_table_on_shard0(service::
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
co_await _mm.wait_for_schema_agreement(_proxy.local_db(), db::timeout_clock::now() + 10s, nullptr);
|
co_await mm.wait_for_schema_agreement(sp.local_db(), db::timeout_clock::now() + 10s, nullptr);
|
||||||
rjson::value status = rjson::empty_object();
|
rjson::value status = rjson::empty_object();
|
||||||
executor::supplement_table_info(request, *schema, _proxy);
|
executor::supplement_table_info(request, *schema, sp);
|
||||||
rjson::add(status, "TableDescription", std::move(request));
|
rjson::add(status, "TableDescription", std::move(request));
|
||||||
co_return rjson::print(std::move(status));
|
co_return rjson::print(std::move(status));
|
||||||
}
|
}
|
||||||
@@ -1972,11 +1865,10 @@ future<executor::request_return_type> executor::create_table(client_state& clien
|
|||||||
_stats.api_operations.create_table++;
|
_stats.api_operations.create_table++;
|
||||||
elogger.trace("Creating table {}", request);
|
elogger.trace("Creating table {}", request);
|
||||||
|
|
||||||
co_return co_await _mm.container().invoke_on(0, [&, tr = tracing::global_trace_state_ptr(trace_state), request = std::move(request), &e = this->container(), client_state_other_shard = client_state.move_to_other_shard(), enforce_authorization = bool(_enforce_authorization), warn_authorization = bool(_warn_authorization)]
|
co_return co_await _mm.container().invoke_on(0, [&, tr = tracing::global_trace_state_ptr(trace_state), request = std::move(request), &sp = _proxy.container(), &g = _gossiper.container(), &e = this->container(), client_state_other_shard = client_state.move_to_other_shard(), enforce_authorization = bool(_enforce_authorization), warn_authorization = bool(_warn_authorization)]
|
||||||
(service::migration_manager& mm) mutable -> future<executor::request_return_type> {
|
(service::migration_manager& mm) mutable -> future<executor::request_return_type> {
|
||||||
const db::tablets_mode_t::mode tablets_mode = _proxy.data_dictionary().get_config().tablets_mode_for_new_keyspaces(); // type cast
|
const db::tablets_mode_t::mode tablets_mode = _proxy.data_dictionary().get_config().tablets_mode_for_new_keyspaces(); // type cast
|
||||||
// `invoke_on` hopped us to shard 0, but `this` points to `executor` is from 'old' shard, we need to hop it too.
|
co_return co_await create_table_on_shard0(client_state_other_shard.get(), tr, std::move(request), sp.local(), mm, g.local(), enforce_authorization, warn_authorization, e.local()._stats, std::move(tablets_mode));
|
||||||
co_return co_await e.local().create_table_on_shard0(client_state_other_shard.get(), tr, std::move(request), enforce_authorization, warn_authorization, std::move(tablets_mode));
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1995,8 +1887,8 @@ future<executor::request_return_type> executor::create_table(client_state& clien
|
|||||||
std::string def_type = type_to_string(def.type);
|
std::string def_type = type_to_string(def.type);
|
||||||
for (auto it = attribute_definitions.Begin(); it != attribute_definitions.End(); ++it) {
|
for (auto it = attribute_definitions.Begin(); it != attribute_definitions.End(); ++it) {
|
||||||
const rjson::value& attribute_info = *it;
|
const rjson::value& attribute_info = *it;
|
||||||
if (rjson::to_string_view(attribute_info["AttributeName"]) == def.name_as_text()) {
|
if (attribute_info["AttributeName"].GetString() == def.name_as_text()) {
|
||||||
std::string_view type = rjson::to_string_view(attribute_info["AttributeType"]);
|
auto type = attribute_info["AttributeType"].GetString();
|
||||||
if (type != def_type) {
|
if (type != def_type) {
|
||||||
throw api_error::validation(fmt::format("AttributeDefinitions redefined {} to {} already a key attribute of type {} in this table", def.name_as_text(), type, def_type));
|
throw api_error::validation(fmt::format("AttributeDefinitions redefined {} to {} already a key attribute of type {} in this table", def.name_as_text(), type, def_type));
|
||||||
}
|
}
|
||||||
@@ -2127,13 +2019,6 @@ future<executor::request_return_type> executor::update_table(client_state& clien
|
|||||||
co_return api_error::validation(fmt::format(
|
co_return api_error::validation(fmt::format(
|
||||||
"LSI {} already exists in table {}, can't use same name for GSI", index_name, table_name));
|
"LSI {} already exists in table {}, can't use same name for GSI", index_name, table_name));
|
||||||
}
|
}
|
||||||
try {
|
|
||||||
locator::assert_rf_rack_valid_keyspace(keyspace_name, p.local().local_db().get_token_metadata_ptr(),
|
|
||||||
p.local().local_db().find_keyspace(keyspace_name).get_replication_strategy());
|
|
||||||
} catch (const std::invalid_argument& ex) {
|
|
||||||
co_return api_error::validation(fmt::format("GlobalSecondaryIndexes on a table "
|
|
||||||
"using tablets require the number of racks in the cluster to be either 1 or 3"));
|
|
||||||
}
|
|
||||||
|
|
||||||
elogger.trace("Adding GSI {}", index_name);
|
elogger.trace("Adding GSI {}", index_name);
|
||||||
// FIXME: read and handle "Projection" parameter. This will
|
// FIXME: read and handle "Projection" parameter. This will
|
||||||
@@ -2436,7 +2321,7 @@ std::unordered_map<bytes, std::string> si_key_attributes(data_dictionary::table
|
|||||||
// case, this function simply won't be called for this attribute.)
|
// case, this function simply won't be called for this attribute.)
|
||||||
//
|
//
|
||||||
// This function checks if the given attribute update is an update to some
|
// This function checks if the given attribute update is an update to some
|
||||||
// GSI's key, and if the value is unsuitable, an api_error::validation is
|
// GSI's key, and if the value is unsuitable, a api_error::validation is
|
||||||
// thrown. The checking here is similar to the checking done in
|
// thrown. The checking here is similar to the checking done in
|
||||||
// get_key_from_typed_value() for the base table's key columns.
|
// get_key_from_typed_value() for the base table's key columns.
|
||||||
//
|
//
|
||||||
@@ -2477,7 +2362,7 @@ put_or_delete_item::put_or_delete_item(const rjson::value& item, schema_ptr sche
|
|||||||
_cells = std::vector<cell>();
|
_cells = std::vector<cell>();
|
||||||
_cells->reserve(item.MemberCount());
|
_cells->reserve(item.MemberCount());
|
||||||
for (auto it = item.MemberBegin(); it != item.MemberEnd(); ++it) {
|
for (auto it = item.MemberBegin(); it != item.MemberEnd(); ++it) {
|
||||||
bytes column_name = to_bytes(rjson::to_string_view(it->name));
|
bytes column_name = to_bytes(it->name.GetString());
|
||||||
validate_value(it->value, "PutItem");
|
validate_value(it->value, "PutItem");
|
||||||
const column_definition* cdef = find_attribute(*schema, column_name);
|
const column_definition* cdef = find_attribute(*schema, column_name);
|
||||||
validate_attr_name_length("", column_name.size(), cdef && cdef->is_primary_key());
|
validate_attr_name_length("", column_name.size(), cdef && cdef->is_primary_key());
|
||||||
@@ -2838,12 +2723,14 @@ future<executor::request_return_type> rmw_operation::execute(service::storage_pr
|
|||||||
}
|
}
|
||||||
} else if (_write_isolation != write_isolation::LWT_ALWAYS) {
|
} else if (_write_isolation != write_isolation::LWT_ALWAYS) {
|
||||||
std::optional<mutation> m = apply(nullptr, api::new_timestamp(), cdc_opts);
|
std::optional<mutation> m = apply(nullptr, api::new_timestamp(), cdc_opts);
|
||||||
throwing_assert(m); // !needs_read_before_write, so apply() did not check a condition
|
SCYLLA_ASSERT(m); // !needs_read_before_write, so apply() did not check a condition
|
||||||
return proxy.mutate(utils::chunked_vector<mutation>{std::move(*m)}, db::consistency_level::LOCAL_QUORUM, executor::default_timeout(), trace_state, std::move(permit), db::allow_per_partition_rate_limit::yes, false, std::move(cdc_opts)).then([this, &wcu_total] () mutable {
|
return proxy.mutate(utils::chunked_vector<mutation>{std::move(*m)}, db::consistency_level::LOCAL_QUORUM, executor::default_timeout(), trace_state, std::move(permit), db::allow_per_partition_rate_limit::yes, false, std::move(cdc_opts)).then([this, &wcu_total] () mutable {
|
||||||
return rmw_operation_return(std::move(_return_attributes), _consumed_capacity, wcu_total);
|
return rmw_operation_return(std::move(_return_attributes), _consumed_capacity, wcu_total);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
throwing_assert(cas_shard);
|
if (!cas_shard) {
|
||||||
|
on_internal_error(elogger, "cas_shard is not set");
|
||||||
|
}
|
||||||
// If we're still here, we need to do this write using LWT:
|
// If we're still here, we need to do this write using LWT:
|
||||||
global_stats.write_using_lwt++;
|
global_stats.write_using_lwt++;
|
||||||
per_table_stats.write_using_lwt++;
|
per_table_stats.write_using_lwt++;
|
||||||
@@ -2896,10 +2783,10 @@ static void verify_all_are_used(const rjson::value* field,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for (auto it = field->MemberBegin(); it != field->MemberEnd(); ++it) {
|
for (auto it = field->MemberBegin(); it != field->MemberEnd(); ++it) {
|
||||||
if (!used.contains(rjson::to_string(it->name))) {
|
if (!used.contains(it->name.GetString())) {
|
||||||
throw api_error::validation(
|
throw api_error::validation(
|
||||||
format("{} has spurious '{}', not used in {}",
|
format("{} has spurious '{}', not used in {}",
|
||||||
field_name, rjson::to_string_view(it->name), operation));
|
field_name, it->name.GetString(), operation));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3113,7 +3000,7 @@ future<executor::request_return_type> executor::delete_item(client_state& client
|
|||||||
}
|
}
|
||||||
|
|
||||||
static schema_ptr get_table_from_batch_request(const service::storage_proxy& proxy, const rjson::value::ConstMemberIterator& batch_request) {
|
static schema_ptr get_table_from_batch_request(const service::storage_proxy& proxy, const rjson::value::ConstMemberIterator& batch_request) {
|
||||||
sstring table_name = rjson::to_sstring(batch_request->name); // JSON keys are always strings
|
sstring table_name = batch_request->name.GetString(); // JSON keys are always strings
|
||||||
try {
|
try {
|
||||||
return proxy.data_dictionary().find_schema(sstring(executor::KEYSPACE_NAME_PREFIX) + table_name, table_name);
|
return proxy.data_dictionary().find_schema(sstring(executor::KEYSPACE_NAME_PREFIX) + table_name, table_name);
|
||||||
} catch(data_dictionary::no_such_column_family&) {
|
} catch(data_dictionary::no_such_column_family&) {
|
||||||
@@ -3168,44 +3055,17 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
future<> executor::cas_write(schema_ptr schema, service::cas_shard cas_shard, const dht::decorated_key& dk,
|
static future<> cas_write(service::storage_proxy& proxy, schema_ptr schema, service::cas_shard cas_shard, const dht::decorated_key& dk, const std::vector<put_or_delete_item>& mutation_builders,
|
||||||
const std::vector<put_or_delete_item>& mutation_builders, service::client_state& client_state,
|
service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit) {
|
||||||
tracing::trace_state_ptr trace_state, service_permit permit)
|
|
||||||
{
|
|
||||||
if (!cas_shard.this_shard()) {
|
|
||||||
_stats.shard_bounce_for_lwt++;
|
|
||||||
return container().invoke_on(cas_shard.shard(), _ssg,
|
|
||||||
[cs = client_state.move_to_other_shard(),
|
|
||||||
&mb = mutation_builders,
|
|
||||||
&dk,
|
|
||||||
ks = schema->ks_name(),
|
|
||||||
cf = schema->cf_name(),
|
|
||||||
gt = tracing::global_trace_state_ptr(trace_state),
|
|
||||||
permit = std::move(permit)]
|
|
||||||
(executor& self) mutable {
|
|
||||||
return do_with(cs.get(), [&mb, &dk, ks = std::move(ks), cf = std::move(cf),
|
|
||||||
trace_state = tracing::trace_state_ptr(gt), &self]
|
|
||||||
(service::client_state& client_state) mutable {
|
|
||||||
auto schema = self._proxy.data_dictionary().find_schema(ks, cf);
|
|
||||||
service::cas_shard cas_shard(*schema, dk.token());
|
|
||||||
|
|
||||||
//FIXME: Instead of passing empty_service_permit() to the background operation,
|
|
||||||
// the current permit's lifetime should be prolonged, so that it's destructed
|
|
||||||
// only after all background operations are finished as well.
|
|
||||||
return self.cas_write(schema, std::move(cas_shard), dk, mb, client_state, std::move(trace_state), empty_service_permit());
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
auto timeout = executor::default_timeout();
|
auto timeout = executor::default_timeout();
|
||||||
auto op = std::make_unique<put_or_delete_item_cas_request>(schema, mutation_builders);
|
auto op = std::make_unique<put_or_delete_item_cas_request>(schema, mutation_builders);
|
||||||
auto* op_ptr = op.get();
|
auto* op_ptr = op.get();
|
||||||
auto cdc_opts = cdc::per_request_options{
|
auto cdc_opts = cdc::per_request_options{
|
||||||
.alternator = true,
|
.alternator = true,
|
||||||
.alternator_streams_increased_compatibility =
|
.alternator_streams_increased_compatibility =
|
||||||
schema->cdc_options().enabled() && _proxy.data_dictionary().get_config().alternator_streams_increased_compatibility(),
|
schema->cdc_options().enabled() && proxy.data_dictionary().get_config().alternator_streams_increased_compatibility(),
|
||||||
};
|
};
|
||||||
return _proxy.cas(schema, std::move(cas_shard), *op_ptr, nullptr, to_partition_ranges(dk),
|
return proxy.cas(schema, std::move(cas_shard), *op_ptr, nullptr, to_partition_ranges(dk),
|
||||||
{timeout, std::move(permit), client_state, trace_state},
|
{timeout, std::move(permit), client_state, trace_state},
|
||||||
db::consistency_level::LOCAL_SERIAL, db::consistency_level::LOCAL_QUORUM,
|
db::consistency_level::LOCAL_SERIAL, db::consistency_level::LOCAL_QUORUM,
|
||||||
timeout, timeout, true, std::move(cdc_opts)).finally([op = std::move(op)]{}).discard_result();
|
timeout, timeout, true, std::move(cdc_opts)).finally([op = std::move(op)]{}).discard_result();
|
||||||
@@ -3231,11 +3091,13 @@ struct schema_decorated_key_equal {
|
|||||||
|
|
||||||
// FIXME: if we failed writing some of the mutations, need to return a list
|
// FIXME: if we failed writing some of the mutations, need to return a list
|
||||||
// of these failed mutations rather than fail the whole write (issue #5650).
|
// of these failed mutations rather than fail the whole write (issue #5650).
|
||||||
future<> executor::do_batch_write(
|
static future<> do_batch_write(service::storage_proxy& proxy,
|
||||||
|
smp_service_group ssg,
|
||||||
std::vector<std::pair<schema_ptr, put_or_delete_item>> mutation_builders,
|
std::vector<std::pair<schema_ptr, put_or_delete_item>> mutation_builders,
|
||||||
service::client_state& client_state,
|
service::client_state& client_state,
|
||||||
tracing::trace_state_ptr trace_state,
|
tracing::trace_state_ptr trace_state,
|
||||||
service_permit permit) {
|
service_permit permit,
|
||||||
|
stats& stats) {
|
||||||
if (mutation_builders.empty()) {
|
if (mutation_builders.empty()) {
|
||||||
return make_ready_future<>();
|
return make_ready_future<>();
|
||||||
}
|
}
|
||||||
@@ -3257,7 +3119,7 @@ future<> executor::do_batch_write(
|
|||||||
mutations.push_back(b.second.build(b.first, now));
|
mutations.push_back(b.second.build(b.first, now));
|
||||||
any_cdc_enabled |= b.first->cdc_options().enabled();
|
any_cdc_enabled |= b.first->cdc_options().enabled();
|
||||||
}
|
}
|
||||||
return _proxy.mutate(std::move(mutations),
|
return proxy.mutate(std::move(mutations),
|
||||||
db::consistency_level::LOCAL_QUORUM,
|
db::consistency_level::LOCAL_QUORUM,
|
||||||
executor::default_timeout(),
|
executor::default_timeout(),
|
||||||
trace_state,
|
trace_state,
|
||||||
@@ -3266,7 +3128,7 @@ future<> executor::do_batch_write(
|
|||||||
false,
|
false,
|
||||||
cdc::per_request_options{
|
cdc::per_request_options{
|
||||||
.alternator = true,
|
.alternator = true,
|
||||||
.alternator_streams_increased_compatibility = any_cdc_enabled && _proxy.data_dictionary().get_config().alternator_streams_increased_compatibility(),
|
.alternator_streams_increased_compatibility = any_cdc_enabled && proxy.data_dictionary().get_config().alternator_streams_increased_compatibility(),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
// Do the write via LWT:
|
// Do the write via LWT:
|
||||||
@@ -3278,35 +3140,46 @@ future<> executor::do_batch_write(
|
|||||||
schema_decorated_key_hash,
|
schema_decorated_key_hash,
|
||||||
schema_decorated_key_equal>;
|
schema_decorated_key_equal>;
|
||||||
auto key_builders = std::make_unique<map_type>(1, schema_decorated_key_hash{}, schema_decorated_key_equal{});
|
auto key_builders = std::make_unique<map_type>(1, schema_decorated_key_hash{}, schema_decorated_key_equal{});
|
||||||
for (auto&& b : std::move(mutation_builders)) {
|
for (auto& b : mutation_builders) {
|
||||||
auto [it, added] = key_builders->try_emplace(schema_decorated_key {
|
auto dk = dht::decorate_key(*b.first, b.second.pk());
|
||||||
.schema = b.first,
|
auto [it, added] = key_builders->try_emplace(schema_decorated_key{b.first, dk});
|
||||||
.dk = dht::decorate_key(*b.first, b.second.pk())
|
|
||||||
});
|
|
||||||
it->second.push_back(std::move(b.second));
|
it->second.push_back(std::move(b.second));
|
||||||
}
|
}
|
||||||
auto* key_builders_ptr = key_builders.get();
|
auto* key_builders_ptr = key_builders.get();
|
||||||
return parallel_for_each(*key_builders_ptr, [this, &client_state, trace_state, permit = std::move(permit)] (const auto& e) {
|
return parallel_for_each(*key_builders_ptr, [&proxy, &client_state, &stats, trace_state, ssg, permit = std::move(permit)] (const auto& e) {
|
||||||
_stats.write_using_lwt++;
|
stats.write_using_lwt++;
|
||||||
auto desired_shard = service::cas_shard(*e.first.schema, e.first.dk.token());
|
auto desired_shard = service::cas_shard(*e.first.schema, e.first.dk.token());
|
||||||
auto s = e.first.schema;
|
if (desired_shard.this_shard()) {
|
||||||
|
return cas_write(proxy, e.first.schema, std::move(desired_shard), e.first.dk, e.second, client_state, trace_state, permit);
|
||||||
|
} else {
|
||||||
|
stats.shard_bounce_for_lwt++;
|
||||||
|
return proxy.container().invoke_on(desired_shard.shard(), ssg,
|
||||||
|
[cs = client_state.move_to_other_shard(),
|
||||||
|
&mb = e.second,
|
||||||
|
&dk = e.first.dk,
|
||||||
|
ks = e.first.schema->ks_name(),
|
||||||
|
cf = e.first.schema->cf_name(),
|
||||||
|
gt = tracing::global_trace_state_ptr(trace_state),
|
||||||
|
permit = std::move(permit)]
|
||||||
|
(service::storage_proxy& proxy) mutable {
|
||||||
|
return do_with(cs.get(), [&proxy, &mb, &dk, ks = std::move(ks), cf = std::move(cf),
|
||||||
|
trace_state = tracing::trace_state_ptr(gt)]
|
||||||
|
(service::client_state& client_state) mutable {
|
||||||
|
auto schema = proxy.data_dictionary().find_schema(ks, cf);
|
||||||
|
|
||||||
static const auto* injection_name = "alternator_executor_batch_write_wait";
|
// The desired_shard on the original shard remains alive for the duration
|
||||||
return utils::get_local_injector().inject(injection_name, [s = std::move(s)] (auto& handler) -> future<> {
|
// of cas_write on this shard and prevents any tablet operations.
|
||||||
const auto ks = handler.get("keyspace");
|
// However, we need a local instance of cas_shard on this shard
|
||||||
const auto cf = handler.get("table");
|
// to pass it to sp::cas, so we just create a new one.
|
||||||
const auto shard = std::atoll(handler.get("shard")->data());
|
service::cas_shard cas_shard(*schema, dk.token());
|
||||||
if (ks == s->ks_name() && cf == s->cf_name() && shard == this_shard_id()) {
|
|
||||||
elogger.info("{}: hit", injection_name);
|
//FIXME: Instead of passing empty_service_permit() to the background operation,
|
||||||
co_await handler.wait_for_message(std::chrono::steady_clock::now() + std::chrono::minutes{5});
|
// the current permit's lifetime should be prolonged, so that it's destructed
|
||||||
elogger.info("{}: continue", injection_name);
|
// only after all background operations are finished as well.
|
||||||
}
|
return cas_write(proxy, schema, std::move(cas_shard), dk, mb, client_state, std::move(trace_state), empty_service_permit());
|
||||||
}).then([&e, desired_shard = std::move(desired_shard),
|
});
|
||||||
&client_state, trace_state = std::move(trace_state), permit = std::move(permit), this]() mutable
|
}).finally([desired_shard = std::move(desired_shard)]{});
|
||||||
{
|
}
|
||||||
return cas_write(e.first.schema, std::move(desired_shard), e.first.dk,
|
|
||||||
std::move(e.second), client_state, std::move(trace_state), std::move(permit));
|
|
||||||
});
|
|
||||||
}).finally([key_builders = std::move(key_builders)]{});
|
}).finally([key_builders = std::move(key_builders)]{});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3454,7 +3327,7 @@ future<executor::request_return_type> executor::batch_write_item(client_state& c
|
|||||||
_stats.wcu_total[stats::DELETE_ITEM] += wcu_delete_units;
|
_stats.wcu_total[stats::DELETE_ITEM] += wcu_delete_units;
|
||||||
_stats.api_operations.batch_write_item_batch_total += total_items;
|
_stats.api_operations.batch_write_item_batch_total += total_items;
|
||||||
_stats.api_operations.batch_write_item_histogram.add(total_items);
|
_stats.api_operations.batch_write_item_histogram.add(total_items);
|
||||||
co_await do_batch_write(std::move(mutation_builders), client_state, trace_state, std::move(permit));
|
co_await do_batch_write(_proxy, _ssg, std::move(mutation_builders), client_state, trace_state, std::move(permit), _stats);
|
||||||
// FIXME: Issue #5650: If we failed writing some of the updates,
|
// FIXME: Issue #5650: If we failed writing some of the updates,
|
||||||
// need to return a list of these failed updates in UnprocessedItems
|
// need to return a list of these failed updates in UnprocessedItems
|
||||||
// rather than fail the whole write (issue #5650).
|
// rather than fail the whole write (issue #5650).
|
||||||
@@ -3463,11 +3336,7 @@ future<executor::request_return_type> executor::batch_write_item(client_state& c
|
|||||||
if (should_add_wcu) {
|
if (should_add_wcu) {
|
||||||
rjson::add(ret, "ConsumedCapacity", std::move(consumed_capacity));
|
rjson::add(ret, "ConsumedCapacity", std::move(consumed_capacity));
|
||||||
}
|
}
|
||||||
auto duration = std::chrono::steady_clock::now() - start_time;
|
_stats.api_operations.batch_write_item_latency.mark(std::chrono::steady_clock::now() - start_time);
|
||||||
_stats.api_operations.batch_write_item_latency.mark(duration);
|
|
||||||
for (const auto& w : per_table_wcu) {
|
|
||||||
w.first->api_operations.batch_write_item_latency.mark(duration);
|
|
||||||
}
|
|
||||||
co_return rjson::print(std::move(ret));
|
co_return rjson::print(std::move(ret));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3503,7 +3372,7 @@ static bool hierarchy_filter(rjson::value& val, const attribute_path_map_node<T>
|
|||||||
}
|
}
|
||||||
rjson::value newv = rjson::empty_object();
|
rjson::value newv = rjson::empty_object();
|
||||||
for (auto it = v.MemberBegin(); it != v.MemberEnd(); ++it) {
|
for (auto it = v.MemberBegin(); it != v.MemberEnd(); ++it) {
|
||||||
std::string attr = rjson::to_string(it->name);
|
std::string attr = it->name.GetString();
|
||||||
auto x = members.find(attr);
|
auto x = members.find(attr);
|
||||||
if (x != members.end()) {
|
if (x != members.end()) {
|
||||||
if (x->second) {
|
if (x->second) {
|
||||||
@@ -3551,7 +3420,7 @@ static bool hierarchy_filter(rjson::value& val, const attribute_path_map_node<T>
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a path to an attribute_path_map. Throws a validation error if the path
|
// Add a path to a attribute_path_map. Throws a validation error if the path
|
||||||
// "overlaps" with one already in the filter (one is a sub-path of the other)
|
// "overlaps" with one already in the filter (one is a sub-path of the other)
|
||||||
// or "conflicts" with it (both a member and index is requested).
|
// or "conflicts" with it (both a member and index is requested).
|
||||||
template<typename T>
|
template<typename T>
|
||||||
@@ -3723,7 +3592,7 @@ static std::optional<attrs_to_get> calculate_attrs_to_get(const rjson::value& re
|
|||||||
const rjson::value& attributes_to_get = req["AttributesToGet"];
|
const rjson::value& attributes_to_get = req["AttributesToGet"];
|
||||||
attrs_to_get ret;
|
attrs_to_get ret;
|
||||||
for (auto it = attributes_to_get.Begin(); it != attributes_to_get.End(); ++it) {
|
for (auto it = attributes_to_get.Begin(); it != attributes_to_get.End(); ++it) {
|
||||||
attribute_path_map_add("AttributesToGet", ret, rjson::to_string(*it));
|
attribute_path_map_add("AttributesToGet", ret, it->GetString());
|
||||||
validate_attr_name_length("AttributesToGet", it->GetStringLength(), false);
|
validate_attr_name_length("AttributesToGet", it->GetStringLength(), false);
|
||||||
}
|
}
|
||||||
if (ret.empty()) {
|
if (ret.empty()) {
|
||||||
@@ -4389,12 +4258,12 @@ inline void update_item_operation::apply_attribute_updates(const std::unique_ptr
|
|||||||
attribute_collector& modified_attrs, bool& any_updates, bool& any_deletes) const {
|
attribute_collector& modified_attrs, bool& any_updates, bool& any_deletes) const {
|
||||||
for (auto it = _attribute_updates->MemberBegin(); it != _attribute_updates->MemberEnd(); ++it) {
|
for (auto it = _attribute_updates->MemberBegin(); it != _attribute_updates->MemberEnd(); ++it) {
|
||||||
// Note that it.key() is the name of the column, *it is the operation
|
// Note that it.key() is the name of the column, *it is the operation
|
||||||
bytes column_name = to_bytes(rjson::to_string_view(it->name));
|
bytes column_name = to_bytes(it->name.GetString());
|
||||||
const column_definition* cdef = _schema->get_column_definition(column_name);
|
const column_definition* cdef = _schema->get_column_definition(column_name);
|
||||||
if (cdef && cdef->is_primary_key()) {
|
if (cdef && cdef->is_primary_key()) {
|
||||||
throw api_error::validation(format("UpdateItem cannot update key column {}", rjson::to_string_view(it->name)));
|
throw api_error::validation(format("UpdateItem cannot update key column {}", it->name.GetString()));
|
||||||
}
|
}
|
||||||
std::string action = rjson::to_string((it->value)["Action"]);
|
std::string action = (it->value)["Action"].GetString();
|
||||||
if (action == "DELETE") {
|
if (action == "DELETE") {
|
||||||
// The DELETE operation can do two unrelated tasks. Without a
|
// The DELETE operation can do two unrelated tasks. Without a
|
||||||
// "Value" option, it is used to delete an attribute. With a
|
// "Value" option, it is used to delete an attribute. With a
|
||||||
@@ -4978,12 +4847,7 @@ future<executor::request_return_type> executor::batch_get_item(client_state& cli
|
|||||||
if (!some_succeeded && eptr) {
|
if (!some_succeeded && eptr) {
|
||||||
co_await coroutine::return_exception_ptr(std::move(eptr));
|
co_await coroutine::return_exception_ptr(std::move(eptr));
|
||||||
}
|
}
|
||||||
auto duration = std::chrono::steady_clock::now() - start_time;
|
_stats.api_operations.batch_get_item_latency.mark(std::chrono::steady_clock::now() - start_time);
|
||||||
_stats.api_operations.batch_get_item_latency.mark(duration);
|
|
||||||
for (const table_requests& rs : requests) {
|
|
||||||
lw_shared_ptr<stats> per_table_stats = get_stats_from_schema(_proxy, *rs.schema);
|
|
||||||
per_table_stats->api_operations.batch_get_item_latency.mark(duration);
|
|
||||||
}
|
|
||||||
if (is_big(response)) {
|
if (is_big(response)) {
|
||||||
co_return make_streamed(std::move(response));
|
co_return make_streamed(std::move(response));
|
||||||
} else {
|
} else {
|
||||||
@@ -5421,7 +5285,7 @@ static future<executor::request_return_type> do_query(service::storage_proxy& pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
static dht::token token_for_segment(int segment, int total_segments) {
|
static dht::token token_for_segment(int segment, int total_segments) {
|
||||||
throwing_assert(total_segments > 1 && segment >= 0 && segment < total_segments);
|
SCYLLA_ASSERT(total_segments > 1 && segment >= 0 && segment < total_segments);
|
||||||
uint64_t delta = std::numeric_limits<uint64_t>::max() / total_segments;
|
uint64_t delta = std::numeric_limits<uint64_t>::max() / total_segments;
|
||||||
return dht::token::from_int64(std::numeric_limits<int64_t>::min() + delta * segment);
|
return dht::token::from_int64(std::numeric_limits<int64_t>::min() + delta * segment);
|
||||||
}
|
}
|
||||||
@@ -5596,7 +5460,7 @@ calculate_bounds_conditions(schema_ptr schema, const rjson::value& conditions) {
|
|||||||
std::vector<query::clustering_range> ck_bounds;
|
std::vector<query::clustering_range> ck_bounds;
|
||||||
|
|
||||||
for (auto it = conditions.MemberBegin(); it != conditions.MemberEnd(); ++it) {
|
for (auto it = conditions.MemberBegin(); it != conditions.MemberEnd(); ++it) {
|
||||||
sstring key = rjson::to_sstring(it->name);
|
std::string key = it->name.GetString();
|
||||||
const rjson::value& condition = it->value;
|
const rjson::value& condition = it->value;
|
||||||
|
|
||||||
const rjson::value& comp_definition = rjson::get(condition, "ComparisonOperator");
|
const rjson::value& comp_definition = rjson::get(condition, "ComparisonOperator");
|
||||||
@@ -5604,13 +5468,13 @@ calculate_bounds_conditions(schema_ptr schema, const rjson::value& conditions) {
|
|||||||
|
|
||||||
const column_definition& pk_cdef = schema->partition_key_columns().front();
|
const column_definition& pk_cdef = schema->partition_key_columns().front();
|
||||||
const column_definition* ck_cdef = schema->clustering_key_size() > 0 ? &schema->clustering_key_columns().front() : nullptr;
|
const column_definition* ck_cdef = schema->clustering_key_size() > 0 ? &schema->clustering_key_columns().front() : nullptr;
|
||||||
if (key == pk_cdef.name_as_text()) {
|
if (sstring(key) == pk_cdef.name_as_text()) {
|
||||||
if (!partition_ranges.empty()) {
|
if (!partition_ranges.empty()) {
|
||||||
throw api_error::validation("Currently only a single restriction per key is allowed");
|
throw api_error::validation("Currently only a single restriction per key is allowed");
|
||||||
}
|
}
|
||||||
partition_ranges.push_back(calculate_pk_bound(schema, pk_cdef, comp_definition, attr_list));
|
partition_ranges.push_back(calculate_pk_bound(schema, pk_cdef, comp_definition, attr_list));
|
||||||
}
|
}
|
||||||
if (ck_cdef && key == ck_cdef->name_as_text()) {
|
if (ck_cdef && sstring(key) == ck_cdef->name_as_text()) {
|
||||||
if (!ck_bounds.empty()) {
|
if (!ck_bounds.empty()) {
|
||||||
throw api_error::validation("Currently only a single restriction per key is allowed");
|
throw api_error::validation("Currently only a single restriction per key is allowed");
|
||||||
}
|
}
|
||||||
@@ -6009,14 +5873,9 @@ future<executor::request_return_type> executor::list_tables(client_state& client
|
|||||||
_stats.api_operations.list_tables++;
|
_stats.api_operations.list_tables++;
|
||||||
elogger.trace("Listing tables {}", request);
|
elogger.trace("Listing tables {}", request);
|
||||||
|
|
||||||
co_await utils::get_local_injector().inject("alternator_list_tables", [] (auto& handler) -> future<> {
|
|
||||||
handler.set("waiting", true);
|
|
||||||
co_await handler.wait_for_message(std::chrono::steady_clock::now() + std::chrono::minutes{5});
|
|
||||||
});
|
|
||||||
|
|
||||||
rjson::value* exclusive_start_json = rjson::find(request, "ExclusiveStartTableName");
|
rjson::value* exclusive_start_json = rjson::find(request, "ExclusiveStartTableName");
|
||||||
rjson::value* limit_json = rjson::find(request, "Limit");
|
rjson::value* limit_json = rjson::find(request, "Limit");
|
||||||
std::string exclusive_start = exclusive_start_json ? rjson::to_string(*exclusive_start_json) : "";
|
std::string exclusive_start = exclusive_start_json ? exclusive_start_json->GetString() : "";
|
||||||
int limit = limit_json ? limit_json->GetInt() : 100;
|
int limit = limit_json ? limit_json->GetInt() : 100;
|
||||||
if (limit < 1 || limit > 100) {
|
if (limit < 1 || limit > 100) {
|
||||||
co_return api_error::validation("Limit must be greater than 0 and no greater than 100");
|
co_return api_error::validation("Limit must be greater than 0 and no greater than 100");
|
||||||
@@ -6205,10 +6064,9 @@ future<> executor::start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
future<> executor::stop() {
|
future<> executor::stop() {
|
||||||
co_await _describe_table_info_manager->stop();
|
|
||||||
// disconnect from the value source, but keep the value unchanged.
|
// disconnect from the value source, but keep the value unchanged.
|
||||||
s_default_timeout_in_ms = utils::updateable_value<uint32_t>{s_default_timeout_in_ms()};
|
s_default_timeout_in_ms = utils::updateable_value<uint32_t>{s_default_timeout_in_ms()};
|
||||||
co_await _parsed_expression_cache->stop();
|
return _parsed_expression_cache->stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace alternator
|
} // namespace alternator
|
||||||
|
|||||||
@@ -17,13 +17,11 @@
|
|||||||
#include "service/client_state.hh"
|
#include "service/client_state.hh"
|
||||||
#include "service_permit.hh"
|
#include "service_permit.hh"
|
||||||
#include "db/timeout_clock.hh"
|
#include "db/timeout_clock.hh"
|
||||||
#include "db/config.hh"
|
|
||||||
|
|
||||||
#include "alternator/error.hh"
|
#include "alternator/error.hh"
|
||||||
#include "stats.hh"
|
#include "stats.hh"
|
||||||
#include "utils/rjson.hh"
|
#include "utils/rjson.hh"
|
||||||
#include "utils/updateable_value.hh"
|
#include "utils/updateable_value.hh"
|
||||||
#include "utils/simple_value_with_expiry.hh"
|
|
||||||
|
|
||||||
#include "tracing/trace_state.hh"
|
#include "tracing/trace_state.hh"
|
||||||
|
|
||||||
@@ -42,8 +40,6 @@ namespace cql3::selection {
|
|||||||
|
|
||||||
namespace service {
|
namespace service {
|
||||||
class storage_proxy;
|
class storage_proxy;
|
||||||
class cas_shard;
|
|
||||||
class storage_service;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace cdc {
|
namespace cdc {
|
||||||
@@ -60,9 +56,7 @@ class schema_builder;
|
|||||||
|
|
||||||
namespace alternator {
|
namespace alternator {
|
||||||
|
|
||||||
enum class table_status;
|
|
||||||
class rmw_operation;
|
class rmw_operation;
|
||||||
class put_or_delete_item;
|
|
||||||
|
|
||||||
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
schema_ptr get_table(service::storage_proxy& proxy, const rjson::value& request);
|
||||||
bool is_alternator_keyspace(const sstring& ks_name);
|
bool is_alternator_keyspace(const sstring& ks_name);
|
||||||
@@ -140,7 +134,6 @@ class expression_cache;
|
|||||||
|
|
||||||
class executor : public peering_sharded_service<executor> {
|
class executor : public peering_sharded_service<executor> {
|
||||||
gms::gossiper& _gossiper;
|
gms::gossiper& _gossiper;
|
||||||
service::storage_service& _ss;
|
|
||||||
service::storage_proxy& _proxy;
|
service::storage_proxy& _proxy;
|
||||||
service::migration_manager& _mm;
|
service::migration_manager& _mm;
|
||||||
db::system_distributed_keyspace& _sdks;
|
db::system_distributed_keyspace& _sdks;
|
||||||
@@ -153,11 +146,6 @@ class executor : public peering_sharded_service<executor> {
|
|||||||
|
|
||||||
std::unique_ptr<parsed::expression_cache> _parsed_expression_cache;
|
std::unique_ptr<parsed::expression_cache> _parsed_expression_cache;
|
||||||
|
|
||||||
struct describe_table_info_manager;
|
|
||||||
std::unique_ptr<describe_table_info_manager> _describe_table_info_manager;
|
|
||||||
|
|
||||||
future<> cache_newly_calculated_size_on_all_shards(schema_ptr schema, std::uint64_t size_in_bytes, std::chrono::nanoseconds ttl);
|
|
||||||
future<> fill_table_size(rjson::value &table_description, schema_ptr schema, bool deleting);
|
|
||||||
public:
|
public:
|
||||||
using client_state = service::client_state;
|
using client_state = service::client_state;
|
||||||
// request_return_type is the return type of the executor methods, which
|
// request_return_type is the return type of the executor methods, which
|
||||||
@@ -183,7 +171,6 @@ public:
|
|||||||
|
|
||||||
executor(gms::gossiper& gossiper,
|
executor(gms::gossiper& gossiper,
|
||||||
service::storage_proxy& proxy,
|
service::storage_proxy& proxy,
|
||||||
service::storage_service& ss,
|
|
||||||
service::migration_manager& mm,
|
service::migration_manager& mm,
|
||||||
db::system_distributed_keyspace& sdks,
|
db::system_distributed_keyspace& sdks,
|
||||||
cdc::metadata& cdc_metadata,
|
cdc::metadata& cdc_metadata,
|
||||||
@@ -231,18 +218,6 @@ private:
|
|||||||
friend class rmw_operation;
|
friend class rmw_operation;
|
||||||
|
|
||||||
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr, const std::map<sstring, sstring> *tags = nullptr);
|
static void describe_key_schema(rjson::value& parent, const schema&, std::unordered_map<std::string,std::string> * = nullptr, const std::map<sstring, sstring> *tags = nullptr);
|
||||||
future<rjson::value> fill_table_description(schema_ptr schema, table_status tbl_status, service::client_state& client_state, tracing::trace_state_ptr trace_state, service_permit permit);
|
|
||||||
future<executor::request_return_type> create_table_on_shard0(service::client_state&& client_state, tracing::trace_state_ptr trace_state, rjson::value request, bool enforce_authorization, bool warn_authorization, const db::tablets_mode_t::mode tablets_mode);
|
|
||||||
|
|
||||||
future<> do_batch_write(
|
|
||||||
std::vector<std::pair<schema_ptr, put_or_delete_item>> mutation_builders,
|
|
||||||
service::client_state& client_state,
|
|
||||||
tracing::trace_state_ptr trace_state,
|
|
||||||
service_permit permit);
|
|
||||||
|
|
||||||
future<> cas_write(schema_ptr schema, service::cas_shard cas_shard, const dht::decorated_key& dk,
|
|
||||||
const std::vector<put_or_delete_item>& mutation_builders, service::client_state& client_state,
|
|
||||||
tracing::trace_state_ptr trace_state, service_permit permit);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&, const std::map<sstring, sstring> *tags = nullptr);
|
static void describe_key_schema(rjson::value& parent, const schema& schema, std::unordered_map<std::string,std::string>&, const std::map<sstring, sstring> *tags = nullptr);
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ public:
|
|||||||
_operators.emplace_back(i);
|
_operators.emplace_back(i);
|
||||||
check_depth_limit();
|
check_depth_limit();
|
||||||
}
|
}
|
||||||
void add_dot(std::string name) {
|
void add_dot(std::string(name)) {
|
||||||
_operators.emplace_back(std::move(name));
|
_operators.emplace_back(std::move(name));
|
||||||
check_depth_limit();
|
check_depth_limit();
|
||||||
}
|
}
|
||||||
@@ -85,7 +85,7 @@ struct constant {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// "value" is a value used in the right hand side of an assignment
|
// "value" is is a value used in the right hand side of an assignment
|
||||||
// expression, "SET a = ...". It can be a constant (a reference to a value
|
// expression, "SET a = ...". It can be a constant (a reference to a value
|
||||||
// included in the request, e.g., ":val"), a path to an attribute from the
|
// included in the request, e.g., ":val"), a path to an attribute from the
|
||||||
// existing item (e.g., "a.b[3].c"), or a function of other such values.
|
// existing item (e.g., "a.b[3].c"), or a function of other such values.
|
||||||
@@ -205,7 +205,7 @@ public:
|
|||||||
// The supported primitive conditions are:
|
// The supported primitive conditions are:
|
||||||
// 1. Binary operators - v1 OP v2, where OP is =, <>, <, <=, >, or >= and
|
// 1. Binary operators - v1 OP v2, where OP is =, <>, <, <=, >, or >= and
|
||||||
// v1 and v2 are values - from the item (an attribute path), the query
|
// v1 and v2 are values - from the item (an attribute path), the query
|
||||||
// (a ":val" reference), or a function of the above (only the size()
|
// (a ":val" reference), or a function of the the above (only the size()
|
||||||
// function is supported).
|
// function is supported).
|
||||||
// 2. Ternary operator - v1 BETWEEN v2 and v3 (means v1 >= v2 AND v1 <= v3).
|
// 2. Ternary operator - v1 BETWEEN v2 and v3 (means v1 >= v2 AND v1 <= v3).
|
||||||
// 3. N-ary operator - v1 IN ( v2, v3, ... )
|
// 3. N-ary operator - v1 IN ( v2, v3, ... )
|
||||||
|
|||||||
@@ -1,301 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2025-present ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "alternator/http_compression.hh"
|
|
||||||
#include "alternator/server.hh"
|
|
||||||
#include <seastar/coroutine/maybe_yield.hh>
|
|
||||||
#include <zlib.h>
|
|
||||||
|
|
||||||
static logging::logger slogger("alternator-http-compression");
|
|
||||||
|
|
||||||
namespace alternator {
|
|
||||||
|
|
||||||
|
|
||||||
static constexpr size_t compressed_buffer_size = 1024;
|
|
||||||
class zlib_compressor {
|
|
||||||
z_stream _zs;
|
|
||||||
temporary_buffer<char> _output_buf;
|
|
||||||
noncopyable_function<future<>(temporary_buffer<char>&&)> _write_func;
|
|
||||||
public:
|
|
||||||
zlib_compressor(bool gzip, int compression_level, noncopyable_function<future<>(temporary_buffer<char>&&)> write_func)
|
|
||||||
: _write_func(std::move(write_func)) {
|
|
||||||
memset(&_zs, 0, sizeof(_zs));
|
|
||||||
if (deflateInit2(&_zs, std::clamp(compression_level, Z_NO_COMPRESSION, Z_BEST_COMPRESSION), Z_DEFLATED,
|
|
||||||
(gzip ? 16 : 0) + MAX_WBITS, 8, Z_DEFAULT_STRATEGY) != Z_OK) {
|
|
||||||
// Should only happen if memory allocation fails
|
|
||||||
throw std::bad_alloc();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
~zlib_compressor() {
|
|
||||||
deflateEnd(&_zs);
|
|
||||||
}
|
|
||||||
future<> close() {
|
|
||||||
return compress(nullptr, 0, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> compress(const char* buf, size_t len, bool is_last_chunk = false) {
|
|
||||||
_zs.next_in = reinterpret_cast<unsigned char*>(const_cast<char*>(buf));
|
|
||||||
_zs.avail_in = (uInt) len;
|
|
||||||
int mode = is_last_chunk ? Z_FINISH : Z_NO_FLUSH;
|
|
||||||
while(_zs.avail_in > 0 || is_last_chunk) {
|
|
||||||
co_await coroutine::maybe_yield();
|
|
||||||
if (_output_buf.empty()) {
|
|
||||||
if (is_last_chunk) {
|
|
||||||
uint32_t max_buffer_size = 0;
|
|
||||||
deflatePending(&_zs, &max_buffer_size, nullptr);
|
|
||||||
max_buffer_size += deflateBound(&_zs, _zs.avail_in) + 1;
|
|
||||||
_output_buf = temporary_buffer<char>(std::min(compressed_buffer_size, (size_t) max_buffer_size));
|
|
||||||
} else {
|
|
||||||
_output_buf = temporary_buffer<char>(compressed_buffer_size);
|
|
||||||
}
|
|
||||||
_zs.next_out = reinterpret_cast<unsigned char*>(_output_buf.get_write());
|
|
||||||
_zs.avail_out = compressed_buffer_size;
|
|
||||||
}
|
|
||||||
int e = deflate(&_zs, mode);
|
|
||||||
if (e < Z_OK) {
|
|
||||||
throw api_error::internal("Error during compression of response body");
|
|
||||||
}
|
|
||||||
if (e == Z_STREAM_END || _zs.avail_out < compressed_buffer_size / 4) {
|
|
||||||
_output_buf.trim(compressed_buffer_size - _zs.avail_out);
|
|
||||||
co_await _write_func(std::move(_output_buf));
|
|
||||||
if (e == Z_STREAM_END) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Helper string_view functions for parsing Accept-Encoding header
|
|
||||||
struct case_insensitive_cmp_sv {
|
|
||||||
bool operator()(std::string_view s1, std::string_view s2) const {
|
|
||||||
return std::equal(s1.begin(), s1.end(), s2.begin(), s2.end(),
|
|
||||||
[](char a, char b) { return ::tolower(a) == ::tolower(b); });
|
|
||||||
}
|
|
||||||
};
|
|
||||||
static inline std::string_view trim_left(std::string_view sv) {
|
|
||||||
while (!sv.empty() && std::isspace(static_cast<unsigned char>(sv.front())))
|
|
||||||
sv.remove_prefix(1);
|
|
||||||
return sv;
|
|
||||||
}
|
|
||||||
static inline std::string_view trim_right(std::string_view sv) {
|
|
||||||
while (!sv.empty() && std::isspace(static_cast<unsigned char>(sv.back())))
|
|
||||||
sv.remove_suffix(1);
|
|
||||||
return sv;
|
|
||||||
}
|
|
||||||
static inline std::string_view trim(std::string_view sv) {
|
|
||||||
return trim_left(trim_right(sv));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline std::vector<std::string_view> split(std::string_view text, char separator) {
|
|
||||||
std::vector<std::string_view> tokens;
|
|
||||||
if (text == "") {
|
|
||||||
return tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
auto pos = text.find_first_of(separator);
|
|
||||||
if (pos != std::string_view::npos) {
|
|
||||||
tokens.emplace_back(text.data(), pos);
|
|
||||||
text.remove_prefix(pos + 1);
|
|
||||||
} else {
|
|
||||||
tokens.emplace_back(text);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
constexpr response_compressor::compression_type response_compressor::get_compression_type(std::string_view encoding) {
|
|
||||||
for (size_t i = 0; i < static_cast<size_t>(compression_type::count); ++i) {
|
|
||||||
if (case_insensitive_cmp_sv{}(encoding, compression_names[i])) {
|
|
||||||
return static_cast<compression_type>(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return compression_type::unknown;
|
|
||||||
}
|
|
||||||
|
|
||||||
response_compressor::compression_type response_compressor::find_compression(std::string_view accept_encoding, size_t response_size) {
|
|
||||||
std::optional<float> ct_q[static_cast<size_t>(compression_type::count)];
|
|
||||||
ct_q[static_cast<size_t>(compression_type::none)] = std::numeric_limits<float>::min(); // enabled, but lowest priority
|
|
||||||
compression_type selected_ct = compression_type::none;
|
|
||||||
|
|
||||||
std::vector<std::string_view> entries = split(accept_encoding, ',');
|
|
||||||
for (auto& e : entries) {
|
|
||||||
std::vector<std::string_view> params = split(e, ';');
|
|
||||||
if (params.size() == 0) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
compression_type ct = get_compression_type(trim(params[0]));
|
|
||||||
if (ct == compression_type::unknown) {
|
|
||||||
continue; // ignore unknown encoding types
|
|
||||||
}
|
|
||||||
if (ct_q[static_cast<size_t>(ct)].has_value() && ct_q[static_cast<size_t>(ct)] != 0.0f) {
|
|
||||||
continue; // already processed this encoding
|
|
||||||
}
|
|
||||||
if (response_size < _threshold[static_cast<size_t>(ct)]) {
|
|
||||||
continue; // below threshold treat as unknown
|
|
||||||
}
|
|
||||||
for (size_t i = 1; i < params.size(); ++i) { // find "q=" parameter
|
|
||||||
auto pos = params[i].find("q=");
|
|
||||||
if (pos == std::string_view::npos) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
std::string_view param = params[i].substr(pos + 2);
|
|
||||||
param = trim(param);
|
|
||||||
// parse quality value
|
|
||||||
float q_value = 1.0f;
|
|
||||||
auto [ptr, ec] = std::from_chars(param.data(), param.data() + param.size(), q_value);
|
|
||||||
if (ec != std::errc() || ptr != param.data() + param.size()) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (q_value < 0.0) {
|
|
||||||
q_value = 0.0;
|
|
||||||
} else if (q_value > 1.0) {
|
|
||||||
q_value = 1.0;
|
|
||||||
}
|
|
||||||
ct_q[static_cast<size_t>(ct)] = q_value;
|
|
||||||
break; // we parsed quality value
|
|
||||||
}
|
|
||||||
if (!ct_q[static_cast<size_t>(ct)].has_value()) {
|
|
||||||
ct_q[static_cast<size_t>(ct)] = 1.0f; // default quality value
|
|
||||||
}
|
|
||||||
// keep the highest encoding (in the order, unless 'any')
|
|
||||||
if (selected_ct == compression_type::any) {
|
|
||||||
if (ct_q[static_cast<size_t>(ct)] >= ct_q[static_cast<size_t>(selected_ct)]) {
|
|
||||||
selected_ct = ct;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (ct_q[static_cast<size_t>(ct)] > ct_q[static_cast<size_t>(selected_ct)]) {
|
|
||||||
selected_ct = ct;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (selected_ct == compression_type::any) {
|
|
||||||
// select any not mentioned or highest quality
|
|
||||||
selected_ct = compression_type::none;
|
|
||||||
for (size_t i = 0; i < static_cast<size_t>(compression_type::compressions_count); ++i) {
|
|
||||||
if (!ct_q[i].has_value()) {
|
|
||||||
return static_cast<compression_type>(i);
|
|
||||||
}
|
|
||||||
if (ct_q[i] > ct_q[static_cast<size_t>(selected_ct)]) {
|
|
||||||
selected_ct = static_cast<compression_type>(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return selected_ct;
|
|
||||||
}
|
|
||||||
|
|
||||||
static future<chunked_content> compress(response_compressor::compression_type ct, const db::config& cfg, std::string str) {
|
|
||||||
chunked_content compressed;
|
|
||||||
auto write = [&compressed](temporary_buffer<char>&& buf) -> future<> {
|
|
||||||
compressed.push_back(std::move(buf));
|
|
||||||
return make_ready_future<>();
|
|
||||||
};
|
|
||||||
zlib_compressor compressor(ct != response_compressor::compression_type::deflate,
|
|
||||||
cfg.alternator_response_gzip_compression_level(), std::move(write));
|
|
||||||
co_await compressor.compress(str.data(), str.size(), true);
|
|
||||||
co_return compressed;
|
|
||||||
}
|
|
||||||
|
|
||||||
static sstring flatten(chunked_content&& cc) {
|
|
||||||
size_t total_size = 0;
|
|
||||||
for (const auto& chunk : cc) {
|
|
||||||
total_size += chunk.size();
|
|
||||||
}
|
|
||||||
sstring result = sstring{ sstring::initialized_later{}, total_size };
|
|
||||||
size_t offset = 0;
|
|
||||||
for (const auto& chunk : cc) {
|
|
||||||
std::copy(chunk.begin(), chunk.end(), result.begin() + offset);
|
|
||||||
offset += chunk.size();
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, const char* content_type, std::string&& response_body) {
|
|
||||||
response_compressor::compression_type ct = find_compression(accept_encoding, response_body.size());
|
|
||||||
if (ct != response_compressor::compression_type::none) {
|
|
||||||
rep->add_header("Content-Encoding", get_encoding_name(ct));
|
|
||||||
rep->set_content_type(content_type);
|
|
||||||
return compress(ct, cfg, std::move(response_body)).then([rep = std::move(rep)] (chunked_content compressed) mutable {
|
|
||||||
rep->_content = flatten(std::move(compressed));
|
|
||||||
return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep));
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
// Note that despite the move, there is a copy here -
|
|
||||||
// as str is std::string and rep->_content is sstring.
|
|
||||||
rep->_content = std::move(response_body);
|
|
||||||
rep->set_content_type(content_type);
|
|
||||||
}
|
|
||||||
return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename Compressor>
|
|
||||||
class compressed_data_sink_impl : public data_sink_impl {
|
|
||||||
output_stream<char> _out;
|
|
||||||
Compressor _compressor;
|
|
||||||
public:
|
|
||||||
template<typename... Args>
|
|
||||||
compressed_data_sink_impl(output_stream<char>&& out, Args&&... args)
|
|
||||||
: _out(std::move(out)), _compressor(std::forward<Args>(args)..., [this](temporary_buffer<char>&& buf) {
|
|
||||||
return _out.write(std::move(buf));
|
|
||||||
}) { }
|
|
||||||
|
|
||||||
future<> put(std::span<temporary_buffer<char>> data) override {
|
|
||||||
return data_sink_impl::fallback_put(data, [this] (temporary_buffer<char>&& buf) {
|
|
||||||
return do_put(std::move(buf));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
future<> do_put(temporary_buffer<char> buf) {
|
|
||||||
co_return co_await _compressor.compress(buf.get(), buf.size());
|
|
||||||
|
|
||||||
}
|
|
||||||
future<> close() override {
|
|
||||||
return _compressor.close().then([this] {
|
|
||||||
return _out.close();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
executor::body_writer compress(response_compressor::compression_type ct, const db::config& cfg, executor::body_writer&& bw) {
|
|
||||||
return [bw = std::move(bw), ct, level = cfg.alternator_response_gzip_compression_level()](output_stream<char>&& out) mutable -> future<> {
|
|
||||||
output_stream_options opts;
|
|
||||||
opts.trim_to_size = true;
|
|
||||||
std::unique_ptr<data_sink_impl> data_sink_impl;
|
|
||||||
switch (ct) {
|
|
||||||
case response_compressor::compression_type::gzip:
|
|
||||||
data_sink_impl = std::make_unique<compressed_data_sink_impl<zlib_compressor>>(std::move(out), true, level);
|
|
||||||
break;
|
|
||||||
case response_compressor::compression_type::deflate:
|
|
||||||
data_sink_impl = std::make_unique<compressed_data_sink_impl<zlib_compressor>>(std::move(out), false, level);
|
|
||||||
break;
|
|
||||||
case response_compressor::compression_type::none:
|
|
||||||
case response_compressor::compression_type::any:
|
|
||||||
case response_compressor::compression_type::unknown:
|
|
||||||
on_internal_error(slogger,"Compression not selected");
|
|
||||||
default:
|
|
||||||
on_internal_error(slogger, "Unsupported compression type for data sink");
|
|
||||||
}
|
|
||||||
return bw(output_stream<char>(data_sink(std::move(data_sink_impl)), compressed_buffer_size, opts));
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
future<std::unique_ptr<http::reply>> response_compressor::generate_reply(std::unique_ptr<http::reply> rep, sstring accept_encoding, const char* content_type, executor::body_writer&& body_writer) {
|
|
||||||
response_compressor::compression_type ct = find_compression(accept_encoding, std::numeric_limits<size_t>::max());
|
|
||||||
if (ct != response_compressor::compression_type::none) {
|
|
||||||
rep->add_header("Content-Encoding", get_encoding_name(ct));
|
|
||||||
rep->write_body(content_type, compress(ct, cfg, std::move(body_writer)));
|
|
||||||
} else {
|
|
||||||
rep->write_body(content_type, std::move(body_writer));
|
|
||||||
}
|
|
||||||
return make_ready_future<std::unique_ptr<http::reply>>(std::move(rep));
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace alternator
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2025-present ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "alternator/executor.hh"
|
|
||||||
#include <seastar/http/httpd.hh>
|
|
||||||
#include "db/config.hh"
|
|
||||||
|
|
||||||
namespace alternator {
|
|
||||||
|
|
||||||
class response_compressor {
|
|
||||||
public:
|
|
||||||
enum class compression_type {
|
|
||||||
gzip,
|
|
||||||
deflate,
|
|
||||||
compressions_count,
|
|
||||||
any = compressions_count,
|
|
||||||
none,
|
|
||||||
count,
|
|
||||||
unknown = count
|
|
||||||
};
|
|
||||||
static constexpr std::string_view compression_names[] = {
|
|
||||||
"gzip",
|
|
||||||
"deflate",
|
|
||||||
"*",
|
|
||||||
"identity"
|
|
||||||
};
|
|
||||||
|
|
||||||
static sstring get_encoding_name(compression_type ct) {
|
|
||||||
return sstring(compression_names[static_cast<size_t>(ct)]);
|
|
||||||
}
|
|
||||||
static constexpr compression_type get_compression_type(std::string_view encoding);
|
|
||||||
|
|
||||||
sstring get_accepted_encoding(const http::request& req) {
|
|
||||||
if (get_threshold() == 0) {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
return req.get_header("Accept-Encoding");
|
|
||||||
}
|
|
||||||
compression_type find_compression(std::string_view accept_encoding, size_t response_size);
|
|
||||||
|
|
||||||
response_compressor(const db::config& cfg)
|
|
||||||
: cfg(cfg)
|
|
||||||
,_gzip_level_observer(
|
|
||||||
cfg.alternator_response_gzip_compression_level.observe([this](int v) {
|
|
||||||
update_threshold();
|
|
||||||
}))
|
|
||||||
,_gzip_threshold_observer(
|
|
||||||
cfg.alternator_response_compression_threshold_in_bytes.observe([this](uint32_t v) {
|
|
||||||
update_threshold();
|
|
||||||
}))
|
|
||||||
{
|
|
||||||
update_threshold();
|
|
||||||
}
|
|
||||||
response_compressor(const response_compressor& rhs) : response_compressor(rhs.cfg) {}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const db::config& cfg;
|
|
||||||
utils::observable<int>::observer _gzip_level_observer;
|
|
||||||
utils::observable<uint32_t>::observer _gzip_threshold_observer;
|
|
||||||
uint32_t _threshold[static_cast<size_t>(compression_type::count)];
|
|
||||||
|
|
||||||
size_t get_threshold() { return _threshold[static_cast<size_t>(compression_type::any)]; }
|
|
||||||
void update_threshold() {
|
|
||||||
_threshold[static_cast<size_t>(compression_type::none)] = std::numeric_limits<uint32_t>::max();
|
|
||||||
_threshold[static_cast<size_t>(compression_type::any)] = std::numeric_limits<uint32_t>::max();
|
|
||||||
uint32_t gzip = cfg.alternator_response_gzip_compression_level() <= 0 ? std::numeric_limits<uint32_t>::max()
|
|
||||||
: cfg.alternator_response_compression_threshold_in_bytes();
|
|
||||||
_threshold[static_cast<size_t>(compression_type::gzip)] = gzip;
|
|
||||||
_threshold[static_cast<size_t>(compression_type::deflate)] = gzip;
|
|
||||||
for (size_t i = 0; i < static_cast<size_t>(compression_type::compressions_count); ++i) {
|
|
||||||
if (_threshold[i] < _threshold[static_cast<size_t>(compression_type::any)]) {
|
|
||||||
_threshold[static_cast<size_t>(compression_type::any)] = _threshold[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep,
|
|
||||||
sstring accept_encoding, const char* content_type, std::string&& response_body);
|
|
||||||
future<std::unique_ptr<http::reply>> generate_reply(std::unique_ptr<http::reply> rep,
|
|
||||||
sstring accept_encoding, const char* content_type, executor::body_writer&& body_writer);
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -496,7 +496,7 @@ const std::pair<std::string, const rjson::value*> unwrap_set(const rjson::value&
|
|||||||
return {"", nullptr};
|
return {"", nullptr};
|
||||||
}
|
}
|
||||||
auto it = v.MemberBegin();
|
auto it = v.MemberBegin();
|
||||||
const std::string it_key = rjson::to_string(it->name);
|
const std::string it_key = it->name.GetString();
|
||||||
if (it_key != "SS" && it_key != "BS" && it_key != "NS") {
|
if (it_key != "SS" && it_key != "BS" && it_key != "NS") {
|
||||||
return {std::move(it_key), nullptr};
|
return {std::move(it_key), nullptr};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ partition_key pk_from_json(const rjson::value& item, schema_ptr schema);
|
|||||||
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
clustering_key ck_from_json(const rjson::value& item, schema_ptr schema);
|
||||||
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
|
position_in_partition pos_from_json(const rjson::value& item, schema_ptr schema);
|
||||||
|
|
||||||
// If v encodes a number (i.e., it is a {"N": [...]}), returns an object representing it. Otherwise,
|
// If v encodes a number (i.e., it is a {"N": [...]}, returns an object representing it. Otherwise,
|
||||||
// raises ValidationException with diagnostic.
|
// raises ValidationException with diagnostic.
|
||||||
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
big_decimal unwrap_number(const rjson::value& v, std::string_view diagnostic);
|
||||||
|
|
||||||
|
|||||||
@@ -34,7 +34,6 @@
|
|||||||
#include "client_data.hh"
|
#include "client_data.hh"
|
||||||
#include "utils/updateable_value.hh"
|
#include "utils/updateable_value.hh"
|
||||||
#include <zlib.h>
|
#include <zlib.h>
|
||||||
#include "alternator/http_compression.hh"
|
|
||||||
|
|
||||||
static logging::logger slogger("alternator-server");
|
static logging::logger slogger("alternator-server");
|
||||||
|
|
||||||
@@ -112,12 +111,9 @@ class api_handler : public handler_base {
|
|||||||
// type applies to all replies, both success and error.
|
// type applies to all replies, both success and error.
|
||||||
static constexpr const char* REPLY_CONTENT_TYPE = "application/x-amz-json-1.0";
|
static constexpr const char* REPLY_CONTENT_TYPE = "application/x-amz-json-1.0";
|
||||||
public:
|
public:
|
||||||
api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle,
|
api_handler(const std::function<future<executor::request_return_type>(std::unique_ptr<request> req)>& _handle) : _f_handle(
|
||||||
const db::config& config) : _response_compressor(config), _f_handle(
|
|
||||||
[this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
|
[this, _handle](std::unique_ptr<request> req, std::unique_ptr<reply> rep) {
|
||||||
sstring accept_encoding = _response_compressor.get_accepted_encoding(*req);
|
return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped([this, rep = std::move(rep)](future<executor::request_return_type> resf) mutable {
|
||||||
return seastar::futurize_invoke(_handle, std::move(req)).then_wrapped(
|
|
||||||
[this, rep = std::move(rep), accept_encoding=std::move(accept_encoding)](future<executor::request_return_type> resf) mutable {
|
|
||||||
if (resf.failed()) {
|
if (resf.failed()) {
|
||||||
// Exceptions of type api_error are wrapped as JSON and
|
// Exceptions of type api_error are wrapped as JSON and
|
||||||
// returned to the client as expected. Other types of
|
// returned to the client as expected. Other types of
|
||||||
@@ -137,20 +133,22 @@ public:
|
|||||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||||
}
|
}
|
||||||
auto res = resf.get();
|
auto res = resf.get();
|
||||||
return std::visit(overloaded_functor {
|
std::visit(overloaded_functor {
|
||||||
[&] (std::string&& str) {
|
[&] (std::string&& str) {
|
||||||
return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding),
|
// Note that despite the move, there is a copy here -
|
||||||
REPLY_CONTENT_TYPE, std::move(str));
|
// as str is std::string and rep->_content is sstring.
|
||||||
|
rep->_content = std::move(str);
|
||||||
|
rep->set_content_type(REPLY_CONTENT_TYPE);
|
||||||
},
|
},
|
||||||
[&] (executor::body_writer&& body_writer) {
|
[&] (executor::body_writer&& body_writer) {
|
||||||
return _response_compressor.generate_reply(std::move(rep), std::move(accept_encoding),
|
rep->write_body(REPLY_CONTENT_TYPE, std::move(body_writer));
|
||||||
REPLY_CONTENT_TYPE, std::move(body_writer));
|
|
||||||
},
|
},
|
||||||
[&] (const api_error& err) {
|
[&] (const api_error& err) {
|
||||||
generate_error_reply(*rep, err);
|
generate_error_reply(*rep, err);
|
||||||
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
|
||||||
}
|
}
|
||||||
}, std::move(res));
|
}, std::move(res));
|
||||||
|
|
||||||
|
return make_ready_future<std::unique_ptr<reply>>(std::move(rep));
|
||||||
});
|
});
|
||||||
}) { }
|
}) { }
|
||||||
|
|
||||||
@@ -179,7 +177,6 @@ protected:
|
|||||||
slogger.trace("api_handler error case: {}", rep._content);
|
slogger.trace("api_handler error case: {}", rep._content);
|
||||||
}
|
}
|
||||||
|
|
||||||
response_compressor _response_compressor;
|
|
||||||
future_handler_function _f_handle;
|
future_handler_function _f_handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -374,45 +371,18 @@ future<std::string> server::verify_signature(const request& req, const chunked_c
|
|||||||
for (const auto& header : signed_headers) {
|
for (const auto& header : signed_headers) {
|
||||||
signed_headers_map.emplace(header, std::string_view());
|
signed_headers_map.emplace(header, std::string_view());
|
||||||
}
|
}
|
||||||
std::vector<std::string> modified_values;
|
|
||||||
for (auto& header : req._headers) {
|
for (auto& header : req._headers) {
|
||||||
std::string header_str;
|
std::string header_str;
|
||||||
header_str.resize(header.first.size());
|
header_str.resize(header.first.size());
|
||||||
std::transform(header.first.begin(), header.first.end(), header_str.begin(), ::tolower);
|
std::transform(header.first.begin(), header.first.end(), header_str.begin(), ::tolower);
|
||||||
auto it = signed_headers_map.find(header_str);
|
auto it = signed_headers_map.find(header_str);
|
||||||
if (it != signed_headers_map.end()) {
|
if (it != signed_headers_map.end()) {
|
||||||
// replace multiple spaces in the header value header.second with
|
it->second = std::string_view(header.second);
|
||||||
// a single space, as required by AWS SigV4 header canonization.
|
|
||||||
// If we modify the value, we need to save it in modified_values
|
|
||||||
// to keep it alive.
|
|
||||||
std::string value;
|
|
||||||
value.reserve(header.second.size());
|
|
||||||
bool prev_space = false;
|
|
||||||
bool modified = false;
|
|
||||||
for (char ch : header.second) {
|
|
||||||
if (ch == ' ') {
|
|
||||||
if (!prev_space) {
|
|
||||||
value += ch;
|
|
||||||
prev_space = true;
|
|
||||||
} else {
|
|
||||||
modified = true; // skip a space
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
value += ch;
|
|
||||||
prev_space = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (modified) {
|
|
||||||
modified_values.emplace_back(std::move(value));
|
|
||||||
it->second = std::string_view(modified_values.back());
|
|
||||||
} else {
|
|
||||||
it->second = std::string_view(header.second);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto cache_getter = [&proxy = _proxy] (std::string username) {
|
auto cache_getter = [&proxy = _proxy, &as = _auth_service] (std::string username) {
|
||||||
return get_key_from_roles(proxy, std::move(username));
|
return get_key_from_roles(proxy, as, std::move(username));
|
||||||
};
|
};
|
||||||
return _key_cache.get_ptr(user, cache_getter).then_wrapped([this, &req, &content,
|
return _key_cache.get_ptr(user, cache_getter).then_wrapped([this, &req, &content,
|
||||||
user = std::move(user),
|
user = std::move(user),
|
||||||
@@ -420,7 +390,6 @@ future<std::string> server::verify_signature(const request& req, const chunked_c
|
|||||||
datestamp = std::move(datestamp),
|
datestamp = std::move(datestamp),
|
||||||
signed_headers_str = std::move(signed_headers_str),
|
signed_headers_str = std::move(signed_headers_str),
|
||||||
signed_headers_map = std::move(signed_headers_map),
|
signed_headers_map = std::move(signed_headers_map),
|
||||||
modified_values = std::move(modified_values),
|
|
||||||
region = std::move(region),
|
region = std::move(region),
|
||||||
service = std::move(service),
|
service = std::move(service),
|
||||||
user_signature = std::move(user_signature)] (future<key_cache::value_ptr> key_ptr_fut) {
|
user_signature = std::move(user_signature)] (future<key_cache::value_ptr> key_ptr_fut) {
|
||||||
@@ -591,11 +560,11 @@ read_entire_stream(input_stream<char>& inp, size_t length_limit) {
|
|||||||
class safe_gzip_zstream {
|
class safe_gzip_zstream {
|
||||||
z_stream _zs;
|
z_stream _zs;
|
||||||
public:
|
public:
|
||||||
// If gzip is true, decode a gzip header (for "Content-Encoding: gzip").
|
safe_gzip_zstream() {
|
||||||
// Otherwise, a zlib header (for "Content-Encoding: deflate").
|
|
||||||
safe_gzip_zstream(bool gzip = true) {
|
|
||||||
memset(&_zs, 0, sizeof(_zs));
|
memset(&_zs, 0, sizeof(_zs));
|
||||||
if (inflateInit2(&_zs, gzip ? 16 + MAX_WBITS : MAX_WBITS) != Z_OK) {
|
// The strange 16 + WMAX_BITS tells zlib to expect and decode
|
||||||
|
// a gzip header, not a zlib header.
|
||||||
|
if (inflateInit2(&_zs, 16 + MAX_WBITS) != Z_OK) {
|
||||||
// Should only happen if memory allocation fails
|
// Should only happen if memory allocation fails
|
||||||
throw std::bad_alloc();
|
throw std::bad_alloc();
|
||||||
}
|
}
|
||||||
@@ -614,21 +583,19 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// ungzip() takes a chunked_content of a compressed request body, and returns
|
// ungzip() takes a chunked_content with a gzip-compressed request body,
|
||||||
// the uncompressed content as a chunked_content. If gzip is true, we expect
|
// uncompresses it, and returns the uncompressed content as a chunked_content.
|
||||||
// gzip header (for "Content-Encoding: gzip"), if gzip is false, we expect a
|
|
||||||
// zlib header (for "Content-Encoding: deflate").
|
|
||||||
// If the uncompressed content exceeds length_limit, an error is thrown.
|
// If the uncompressed content exceeds length_limit, an error is thrown.
|
||||||
static future<chunked_content>
|
static future<chunked_content>
|
||||||
ungzip(chunked_content&& compressed_body, size_t length_limit, bool gzip = true) {
|
ungzip(chunked_content&& compressed_body, size_t length_limit) {
|
||||||
chunked_content ret;
|
chunked_content ret;
|
||||||
// output_buf can be any size - when uncompressing input_buf, it doesn't
|
// output_buf can be any size - when uncompressing input_buf, it doesn't
|
||||||
// need to fit in a single output_buf, we'll use multiple output_buf for
|
// need to fit in a single output_buf, we'll use multiple output_buf for
|
||||||
// a single input_buf if needed.
|
// a single input_buf if needed.
|
||||||
constexpr size_t OUTPUT_BUF_SIZE = 4096;
|
constexpr size_t OUTPUT_BUF_SIZE = 4096;
|
||||||
temporary_buffer<char> output_buf;
|
temporary_buffer<char> output_buf;
|
||||||
safe_gzip_zstream strm(gzip);
|
safe_gzip_zstream strm;
|
||||||
bool complete_stream = false; // empty input is not a valid gzip/deflate
|
bool complete_stream = false; // empty input is not a valid gzip
|
||||||
size_t total_out_bytes = 0;
|
size_t total_out_bytes = 0;
|
||||||
for (const temporary_buffer<char>& input_buf : compressed_body) {
|
for (const temporary_buffer<char>& input_buf : compressed_body) {
|
||||||
if (input_buf.empty()) {
|
if (input_buf.empty()) {
|
||||||
@@ -710,7 +677,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
|||||||
++_executor._stats.requests_blocked_memory;
|
++_executor._stats.requests_blocked_memory;
|
||||||
}
|
}
|
||||||
auto units = co_await std::move(units_fut);
|
auto units = co_await std::move(units_fut);
|
||||||
throwing_assert(req->content_stream);
|
SCYLLA_ASSERT(req->content_stream);
|
||||||
chunked_content content = co_await read_entire_stream(*req->content_stream, request_content_length_limit);
|
chunked_content content = co_await read_entire_stream(*req->content_stream, request_content_length_limit);
|
||||||
// If the request had no Content-Length, we reserved too many units
|
// If the request had no Content-Length, we reserved too many units
|
||||||
// so need to return some
|
// so need to return some
|
||||||
@@ -731,8 +698,6 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
|||||||
sstring content_encoding = req->get_header("Content-Encoding");
|
sstring content_encoding = req->get_header("Content-Encoding");
|
||||||
if (content_encoding == "gzip") {
|
if (content_encoding == "gzip") {
|
||||||
content = co_await ungzip(std::move(content), request_content_length_limit);
|
content = co_await ungzip(std::move(content), request_content_length_limit);
|
||||||
} else if (content_encoding == "deflate") {
|
|
||||||
content = co_await ungzip(std::move(content), request_content_length_limit, false);
|
|
||||||
} else if (!content_encoding.empty()) {
|
} else if (!content_encoding.empty()) {
|
||||||
// DynamoDB returns a 500 error for unsupported Content-Encoding.
|
// DynamoDB returns a 500 error for unsupported Content-Encoding.
|
||||||
// I'm not sure if this is the best error code, but let's do it too.
|
// I'm not sure if this is the best error code, but let's do it too.
|
||||||
@@ -743,12 +708,8 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
|||||||
// As long as the system_clients_entry object is alive, this request will
|
// As long as the system_clients_entry object is alive, this request will
|
||||||
// be visible in the "system.clients" virtual table. When requested, this
|
// be visible in the "system.clients" virtual table. When requested, this
|
||||||
// entry will be formatted by server::ongoing_request::make_client_data().
|
// entry will be formatted by server::ongoing_request::make_client_data().
|
||||||
auto user_agent_header = co_await _connection_options_keys_and_values.get_or_load(req->get_header("User-Agent"), [] (const client_options_cache_key_type&) {
|
|
||||||
return make_ready_future<options_cache_value_type>(options_cache_value_type{});
|
|
||||||
});
|
|
||||||
|
|
||||||
auto system_clients_entry = _ongoing_requests.emplace(
|
auto system_clients_entry = _ongoing_requests.emplace(
|
||||||
req->get_client_address(), std::move(user_agent_header),
|
req->get_client_address(), req->get_header("User-Agent"),
|
||||||
username, current_scheduling_group(),
|
username, current_scheduling_group(),
|
||||||
req->get_protocol_name() == "https");
|
req->get_protocol_name() == "https");
|
||||||
|
|
||||||
@@ -771,7 +732,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
|||||||
if (!username.empty()) {
|
if (!username.empty()) {
|
||||||
client_state.set_login(auth::authenticated_user(username));
|
client_state.set_login(auth::authenticated_user(username));
|
||||||
}
|
}
|
||||||
client_state.maybe_update_per_service_level_params();
|
co_await client_state.maybe_update_per_service_level_params();
|
||||||
|
|
||||||
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content, _max_users_query_size_in_trace_output.get());
|
tracing::trace_state_ptr trace_state = maybe_trace_query(client_state, username, op, content, _max_users_query_size_in_trace_output.get());
|
||||||
tracing::trace(trace_state, "{}", op);
|
tracing::trace(trace_state, "{}", op);
|
||||||
@@ -793,7 +754,7 @@ future<executor::request_return_type> server::handle_api_request(std::unique_ptr
|
|||||||
void server::set_routes(routes& r) {
|
void server::set_routes(routes& r) {
|
||||||
api_handler* req_handler = new api_handler([this] (std::unique_ptr<request> req) mutable {
|
api_handler* req_handler = new api_handler([this] (std::unique_ptr<request> req) mutable {
|
||||||
return handle_api_request(std::move(req));
|
return handle_api_request(std::move(req));
|
||||||
}, _proxy.data_dictionary().get_config());
|
});
|
||||||
|
|
||||||
r.put(operation_type::POST, "/", req_handler);
|
r.put(operation_type::POST, "/", req_handler);
|
||||||
r.put(operation_type::GET, "/", new health_handler(_pending_requests));
|
r.put(operation_type::GET, "/", new health_handler(_pending_requests));
|
||||||
@@ -904,9 +865,7 @@ server::server(executor& exec, service::storage_proxy& proxy, gms::gossiper& gos
|
|||||||
} {
|
} {
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port,
|
future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||||
std::optional<uint16_t> port_proxy_protocol, std::optional<uint16_t> https_port_proxy_protocol,
|
|
||||||
std::optional<tls::credentials_builder> creds,
|
|
||||||
utils::updateable_value<bool> enforce_authorization, utils::updateable_value<bool> warn_authorization, utils::updateable_value<uint64_t> max_users_query_size_in_trace_output,
|
utils::updateable_value<bool> enforce_authorization, utils::updateable_value<bool> warn_authorization, utils::updateable_value<uint64_t> max_users_query_size_in_trace_output,
|
||||||
semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests) {
|
semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests) {
|
||||||
_memory_limiter = memory_limiter;
|
_memory_limiter = memory_limiter;
|
||||||
@@ -914,28 +873,20 @@ future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std:
|
|||||||
_warn_authorization = std::move(warn_authorization);
|
_warn_authorization = std::move(warn_authorization);
|
||||||
_max_concurrent_requests = std::move(max_concurrent_requests);
|
_max_concurrent_requests = std::move(max_concurrent_requests);
|
||||||
_max_users_query_size_in_trace_output = std::move(max_users_query_size_in_trace_output);
|
_max_users_query_size_in_trace_output = std::move(max_users_query_size_in_trace_output);
|
||||||
if (!port && !https_port && !port_proxy_protocol && !https_port_proxy_protocol) {
|
if (!port && !https_port) {
|
||||||
return make_exception_future<>(std::runtime_error("Either regular port or TLS port"
|
return make_exception_future<>(std::runtime_error("Either regular port or TLS port"
|
||||||
" must be specified in order to init an alternator HTTP server instance"));
|
" must be specified in order to init an alternator HTTP server instance"));
|
||||||
}
|
}
|
||||||
return seastar::async([this, addr, port, https_port, port_proxy_protocol, https_port_proxy_protocol, creds] {
|
return seastar::async([this, addr, port, https_port, creds] {
|
||||||
_executor.start().get();
|
_executor.start().get();
|
||||||
|
|
||||||
if (port || port_proxy_protocol) {
|
if (port) {
|
||||||
set_routes(_http_server._routes);
|
set_routes(_http_server._routes);
|
||||||
_http_server.set_content_streaming(true);
|
_http_server.set_content_streaming(true);
|
||||||
if (port) {
|
_http_server.listen(socket_address{addr, *port}).get();
|
||||||
_http_server.listen(socket_address{addr, *port}).get();
|
|
||||||
}
|
|
||||||
if (port_proxy_protocol) {
|
|
||||||
listen_options lo;
|
|
||||||
lo.reuse_address = true;
|
|
||||||
lo.proxy_protocol = true;
|
|
||||||
_http_server.listen(socket_address{addr, *port_proxy_protocol}, lo).get();
|
|
||||||
}
|
|
||||||
_enabled_servers.push_back(std::ref(_http_server));
|
_enabled_servers.push_back(std::ref(_http_server));
|
||||||
}
|
}
|
||||||
if (https_port || https_port_proxy_protocol) {
|
if (https_port) {
|
||||||
set_routes(_https_server._routes);
|
set_routes(_https_server._routes);
|
||||||
_https_server.set_content_streaming(true);
|
_https_server.set_content_streaming(true);
|
||||||
|
|
||||||
@@ -955,15 +906,7 @@ future<> server::init(net::inet_address addr, std::optional<uint16_t> port, std:
|
|||||||
} else {
|
} else {
|
||||||
_credentials = creds->build_server_credentials();
|
_credentials = creds->build_server_credentials();
|
||||||
}
|
}
|
||||||
if (https_port) {
|
_https_server.listen(socket_address{addr, *https_port}, _credentials).get();
|
||||||
_https_server.listen(socket_address{addr, *https_port}, _credentials).get();
|
|
||||||
}
|
|
||||||
if (https_port_proxy_protocol) {
|
|
||||||
listen_options lo;
|
|
||||||
lo.reuse_address = true;
|
|
||||||
lo.proxy_protocol = true;
|
|
||||||
_https_server.listen(socket_address{addr, *https_port_proxy_protocol}, lo, _credentials).get();
|
|
||||||
}
|
|
||||||
_enabled_servers.push_back(std::ref(_https_server));
|
_enabled_servers.push_back(std::ref(_https_server));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -1036,15 +979,16 @@ client_data server::ongoing_request::make_client_data() const {
|
|||||||
// and keep "driver_version" unset.
|
// and keep "driver_version" unset.
|
||||||
cd.driver_name = _user_agent;
|
cd.driver_name = _user_agent;
|
||||||
// Leave "protocol_version" unset, it has no meaning in Alternator.
|
// Leave "protocol_version" unset, it has no meaning in Alternator.
|
||||||
// Leave "hostname", "ssl_protocol" and "ssl_cipher_suite" unset for Alternator.
|
// Leave "hostname", "ssl_protocol" and "ssl_cipher_suite" unset.
|
||||||
// Note: CQL sets ssl_protocol and ssl_cipher_suite via generic_server::connection base class.
|
// As reported in issue #9216, we never set these fields in CQL
|
||||||
|
// either (see cql_server::connection::make_client_data()).
|
||||||
return cd;
|
return cd;
|
||||||
}
|
}
|
||||||
|
|
||||||
future<utils::chunked_vector<foreign_ptr<std::unique_ptr<client_data>>>> server::get_client_data() {
|
future<utils::chunked_vector<client_data>> server::get_client_data() {
|
||||||
utils::chunked_vector<foreign_ptr<std::unique_ptr<client_data>>> ret;
|
utils::chunked_vector<client_data> ret;
|
||||||
co_await _ongoing_requests.for_each_gently([&ret] (const ongoing_request& r) {
|
co_await _ongoing_requests.for_each_gently([&ret] (const ongoing_request& r) {
|
||||||
ret.emplace_back(make_foreign(std::make_unique<client_data>(r.make_client_data())));
|
ret.emplace_back(r.make_client_data());
|
||||||
});
|
});
|
||||||
co_return ret;
|
co_return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -55,7 +55,6 @@ class server : public peering_sharded_service<server> {
|
|||||||
// though it isn't really relevant for Alternator which defines its own
|
// though it isn't really relevant for Alternator which defines its own
|
||||||
// timeouts separately. We can create this object only once.
|
// timeouts separately. We can create this object only once.
|
||||||
updateable_timeout_config _timeout_config;
|
updateable_timeout_config _timeout_config;
|
||||||
client_options_cache_type _connection_options_keys_and_values;
|
|
||||||
|
|
||||||
alternator_callbacks_map _callbacks;
|
alternator_callbacks_map _callbacks;
|
||||||
|
|
||||||
@@ -89,7 +88,7 @@ class server : public peering_sharded_service<server> {
|
|||||||
// is called when reading the "system.clients" virtual table.
|
// is called when reading the "system.clients" virtual table.
|
||||||
struct ongoing_request {
|
struct ongoing_request {
|
||||||
socket_address _client_address;
|
socket_address _client_address;
|
||||||
client_options_cache_entry_type _user_agent;
|
sstring _user_agent;
|
||||||
sstring _username;
|
sstring _username;
|
||||||
scheduling_group _scheduling_group;
|
scheduling_group _scheduling_group;
|
||||||
bool _is_https;
|
bool _is_https;
|
||||||
@@ -100,9 +99,7 @@ class server : public peering_sharded_service<server> {
|
|||||||
public:
|
public:
|
||||||
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller);
|
server(executor& executor, service::storage_proxy& proxy, gms::gossiper& gossiper, auth::service& service, qos::service_level_controller& sl_controller);
|
||||||
|
|
||||||
future<> init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port,
|
future<> init(net::inet_address addr, std::optional<uint16_t> port, std::optional<uint16_t> https_port, std::optional<tls::credentials_builder> creds,
|
||||||
std::optional<uint16_t> port_proxy_protocol, std::optional<uint16_t> https_port_proxy_protocol,
|
|
||||||
std::optional<tls::credentials_builder> creds,
|
|
||||||
utils::updateable_value<bool> enforce_authorization, utils::updateable_value<bool> warn_authorization, utils::updateable_value<uint64_t> max_users_query_size_in_trace_output,
|
utils::updateable_value<bool> enforce_authorization, utils::updateable_value<bool> warn_authorization, utils::updateable_value<uint64_t> max_users_query_size_in_trace_output,
|
||||||
semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests);
|
semaphore* memory_limiter, utils::updateable_value<uint32_t> max_concurrent_requests);
|
||||||
future<> stop();
|
future<> stop();
|
||||||
@@ -110,7 +107,7 @@ public:
|
|||||||
// table "system.clients" is read. It is expected to generate a list of
|
// table "system.clients" is read. It is expected to generate a list of
|
||||||
// clients connected to this server (on this shard). This function is
|
// clients connected to this server (on this shard). This function is
|
||||||
// called by alternator::controller::get_client_data().
|
// called by alternator::controller::get_client_data().
|
||||||
future<utils::chunked_vector<foreign_ptr<std::unique_ptr<client_data>>>> get_client_data();
|
future<utils::chunked_vector<client_data>> get_client_data();
|
||||||
private:
|
private:
|
||||||
void set_routes(seastar::httpd::routes& r);
|
void set_routes(seastar::httpd::routes& r);
|
||||||
// If verification succeeds, returns the authenticated user's username
|
// If verification succeeds, returns the authenticated user's username
|
||||||
|
|||||||
@@ -14,6 +14,20 @@
|
|||||||
namespace alternator {
|
namespace alternator {
|
||||||
|
|
||||||
const char* ALTERNATOR_METRICS = "alternator";
|
const char* ALTERNATOR_METRICS = "alternator";
|
||||||
|
static seastar::metrics::histogram estimated_histogram_to_metrics(const utils::estimated_histogram& histogram) {
|
||||||
|
seastar::metrics::histogram res;
|
||||||
|
res.buckets.resize(histogram.bucket_offsets.size());
|
||||||
|
uint64_t cumulative_count = 0;
|
||||||
|
res.sample_count = histogram._count;
|
||||||
|
res.sample_sum = histogram._sample_sum;
|
||||||
|
for (size_t i = 0; i < res.buckets.size(); i++) {
|
||||||
|
auto& v = res.buckets[i];
|
||||||
|
v.upper_bound = histogram.bucket_offsets[i];
|
||||||
|
cumulative_count += histogram.buckets[i];
|
||||||
|
v.count = cumulative_count;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
static seastar::metrics::label column_family_label("cf");
|
static seastar::metrics::label column_family_label("cf");
|
||||||
static seastar::metrics::label keyspace_label("ks");
|
static seastar::metrics::label keyspace_label("ks");
|
||||||
@@ -137,21 +151,21 @@ static void register_metrics_with_optional_table(seastar::metrics::metric_groups
|
|||||||
seastar::metrics::make_counter("batch_item_count", seastar::metrics::description("The total number of items processed across all batches"), labels,
|
seastar::metrics::make_counter("batch_item_count", seastar::metrics::description("The total number of items processed across all batches"), labels,
|
||||||
stats.api_operations.batch_get_item_batch_total)(op("BatchGetItem")).aggregate(aggregate_labels).set_skip_when_empty(),
|
stats.api_operations.batch_get_item_batch_total)(op("BatchGetItem")).aggregate(aggregate_labels).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels,
|
seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.api_operations.batch_get_item_histogram);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.api_operations.batch_get_item_histogram);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels,
|
seastar::metrics::make_histogram("batch_item_count_histogram", seastar::metrics::description("Histogram of the number of items in a batch request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.api_operations.batch_write_item_histogram);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.api_operations.batch_write_item_histogram);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.operation_sizes.get_item_op_size_kb);})(op("GetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.get_item_op_size_kb);})(op("GetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.operation_sizes.put_item_op_size_kb);})(op("PutItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.put_item_op_size_kb);})(op("PutItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.operation_sizes.delete_item_op_size_kb);})(op("DeleteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.delete_item_op_size_kb);})(op("DeleteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.operation_sizes.update_item_op_size_kb);})(op("UpdateItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.update_item_op_size_kb);})(op("UpdateItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.operation_sizes.batch_get_item_op_size_kb);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.batch_get_item_op_size_kb);})(op("BatchGetItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
seastar::metrics::make_histogram("operation_size_kb", seastar::metrics::description("Histogram of item sizes involved in a request"), labels,
|
||||||
[&stats]{ return to_metrics_histogram(stats.operation_sizes.batch_write_item_op_size_kb);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
[&stats]{ return estimated_histogram_to_metrics(stats.operation_sizes.batch_write_item_op_size_kb);})(op("BatchWriteItem")).aggregate({seastar::metrics::shard_label}).set_skip_when_empty(),
|
||||||
});
|
});
|
||||||
|
|
||||||
seastar::metrics::label expression_label("expression");
|
seastar::metrics::label expression_label("expression");
|
||||||
|
|||||||
@@ -16,8 +16,6 @@
|
|||||||
#include "cql3/stats.hh"
|
#include "cql3/stats.hh"
|
||||||
|
|
||||||
namespace alternator {
|
namespace alternator {
|
||||||
using batch_histogram = utils::estimated_histogram_with_max<128>;
|
|
||||||
using op_size_histogram = utils::estimated_histogram_with_max<512>;
|
|
||||||
|
|
||||||
// Object holding per-shard statistics related to Alternator.
|
// Object holding per-shard statistics related to Alternator.
|
||||||
// While this object is alive, these metrics are also registered to be
|
// While this object is alive, these metrics are also registered to be
|
||||||
@@ -78,34 +76,34 @@ public:
|
|||||||
utils::timed_rate_moving_average_summary_and_histogram batch_get_item_latency;
|
utils::timed_rate_moving_average_summary_and_histogram batch_get_item_latency;
|
||||||
utils::timed_rate_moving_average_summary_and_histogram get_records_latency;
|
utils::timed_rate_moving_average_summary_and_histogram get_records_latency;
|
||||||
|
|
||||||
batch_histogram batch_get_item_histogram;
|
utils::estimated_histogram batch_get_item_histogram{22}; // a histogram that covers the range 1 - 100
|
||||||
batch_histogram batch_write_item_histogram;
|
utils::estimated_histogram batch_write_item_histogram{22}; // a histogram that covers the range 1 - 100
|
||||||
} api_operations;
|
} api_operations;
|
||||||
// Operation size metrics
|
// Operation size metrics
|
||||||
struct {
|
struct {
|
||||||
// Item size statistics collected per table and aggregated per node.
|
// Item size statistics collected per table and aggregated per node.
|
||||||
// Each histogram covers the range 0 - 512. Resolves #25143.
|
// Each histogram covers the range 0 - 446. Resolves #25143.
|
||||||
// A size is the retrieved item's size.
|
// A size is the retrieved item's size.
|
||||||
op_size_histogram get_item_op_size_kb;
|
utils::estimated_histogram get_item_op_size_kb{30};
|
||||||
// A size is the maximum of the new item's size and the old item's size.
|
// A size is the maximum of the new item's size and the old item's size.
|
||||||
op_size_histogram put_item_op_size_kb;
|
utils::estimated_histogram put_item_op_size_kb{30};
|
||||||
// A size is the deleted item's size. If the deleted item's size is
|
// A size is the deleted item's size. If the deleted item's size is
|
||||||
// unknown (i.e. read-before-write wasn't necessary and it wasn't
|
// unknown (i.e. read-before-write wasn't necessary and it wasn't
|
||||||
// forced by a configuration option), it won't be recorded on the
|
// forced by a configuration option), it won't be recorded on the
|
||||||
// histogram.
|
// histogram.
|
||||||
op_size_histogram delete_item_op_size_kb;
|
utils::estimated_histogram delete_item_op_size_kb{30};
|
||||||
// A size is the maximum of existing item's size and the estimated size
|
// A size is the maximum of existing item's size and the estimated size
|
||||||
// of the update. This will be changed to the maximum of the existing item's
|
// of the update. This will be changed to the maximum of the existing item's
|
||||||
// size and the new item's size in a subsequent PR.
|
// size and the new item's size in a subsequent PR.
|
||||||
op_size_histogram update_item_op_size_kb;
|
utils::estimated_histogram update_item_op_size_kb{30};
|
||||||
|
|
||||||
// A size is the sum of the sizes of all items per table. This means
|
// A size is the sum of the sizes of all items per table. This means
|
||||||
// that a single BatchGetItem / BatchWriteItem updates the histogram
|
// that a single BatchGetItem / BatchWriteItem updates the histogram
|
||||||
// for each table that it has items in.
|
// for each table that it has items in.
|
||||||
// The sizes are the retrieved items' sizes grouped per table.
|
// The sizes are the retrieved items' sizes grouped per table.
|
||||||
op_size_histogram batch_get_item_op_size_kb;
|
utils::estimated_histogram batch_get_item_op_size_kb{30};
|
||||||
// The sizes are the the written items' sizes grouped per table.
|
// The sizes are the the written items' sizes grouped per table.
|
||||||
op_size_histogram batch_write_item_op_size_kb;
|
utils::estimated_histogram batch_write_item_op_size_kb{30};
|
||||||
} operation_sizes;
|
} operation_sizes;
|
||||||
// Count of authentication and authorization failures, counted if either
|
// Count of authentication and authorization failures, counted if either
|
||||||
// alternator_enforce_authorization or alternator_warn_authorization are
|
// alternator_enforce_authorization or alternator_warn_authorization are
|
||||||
@@ -142,7 +140,7 @@ public:
|
|||||||
cql3::cql_stats cql_stats;
|
cql3::cql_stats cql_stats;
|
||||||
|
|
||||||
// Enumeration of expression types only for stats
|
// Enumeration of expression types only for stats
|
||||||
// if needed it can be extended e.g. per operation
|
// if needed it can be extended e.g. per operation
|
||||||
enum expression_types {
|
enum expression_types {
|
||||||
UPDATE_EXPRESSION,
|
UPDATE_EXPRESSION,
|
||||||
CONDITION_EXPRESSION,
|
CONDITION_EXPRESSION,
|
||||||
@@ -166,7 +164,7 @@ struct table_stats {
|
|||||||
void register_metrics(seastar::metrics::metric_groups& metrics, const stats& stats);
|
void register_metrics(seastar::metrics::metric_groups& metrics, const stats& stats);
|
||||||
|
|
||||||
inline uint64_t bytes_to_kb_ceil(uint64_t bytes) {
|
inline uint64_t bytes_to_kb_ceil(uint64_t bytes) {
|
||||||
return (bytes) / 1024;
|
return (bytes + 1023) / 1024;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,8 +33,6 @@
|
|||||||
#include "data_dictionary/data_dictionary.hh"
|
#include "data_dictionary/data_dictionary.hh"
|
||||||
#include "utils/rjson.hh"
|
#include "utils/rjson.hh"
|
||||||
|
|
||||||
static logging::logger elogger("alternator-streams");
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Base template type to implement rapidjson::internal::TypeHelper<...>:s
|
* Base template type to implement rapidjson::internal::TypeHelper<...>:s
|
||||||
* for types that are ostreamable/string constructible/castable.
|
* for types that are ostreamable/string constructible/castable.
|
||||||
@@ -430,25 +428,6 @@ using namespace std::chrono_literals;
|
|||||||
// Dynamo docs says no data shall live longer than 24h.
|
// Dynamo docs says no data shall live longer than 24h.
|
||||||
static constexpr auto dynamodb_streams_max_window = 24h;
|
static constexpr auto dynamodb_streams_max_window = 24h;
|
||||||
|
|
||||||
// find the parent shard in previous generation for the given child shard
|
|
||||||
// takes care of wrap-around case in vnodes
|
|
||||||
// prev_streams must be sorted by token
|
|
||||||
const cdc::stream_id& find_parent_shard_in_previous_generation(db_clock::time_point prev_timestamp, const utils::chunked_vector<cdc::stream_id> &prev_streams, const cdc::stream_id &child) {
|
|
||||||
if (prev_streams.empty()) {
|
|
||||||
// something is really wrong - streams are empty
|
|
||||||
// let's try internal_error in hope it will be notified and fixed
|
|
||||||
on_internal_error(elogger, fmt::format("streams are empty for cdc generation at {} ({})", prev_timestamp, prev_timestamp.time_since_epoch().count()));
|
|
||||||
}
|
|
||||||
auto it = std::lower_bound(prev_streams.begin(), prev_streams.end(), child.token(), [](const cdc::stream_id& id, const dht::token& t) {
|
|
||||||
return id.token() < t;
|
|
||||||
});
|
|
||||||
if (it == prev_streams.end()) {
|
|
||||||
// wrap around case - take first
|
|
||||||
it = prev_streams.begin();
|
|
||||||
}
|
|
||||||
return *it;
|
|
||||||
}
|
|
||||||
|
|
||||||
future<executor::request_return_type> executor::describe_stream(client_state& client_state, service_permit permit, rjson::value request) {
|
future<executor::request_return_type> executor::describe_stream(client_state& client_state, service_permit permit, rjson::value request) {
|
||||||
_stats.api_operations.describe_stream++;
|
_stats.api_operations.describe_stream++;
|
||||||
|
|
||||||
@@ -512,7 +491,7 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
|||||||
|
|
||||||
if (!opts.enabled()) {
|
if (!opts.enabled()) {
|
||||||
rjson::add(ret, "StreamDescription", std::move(stream_desc));
|
rjson::add(ret, "StreamDescription", std::move(stream_desc));
|
||||||
co_return rjson::print(std::move(ret));
|
return make_ready_future<executor::request_return_type>(rjson::print(std::move(ret)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: label
|
// TODO: label
|
||||||
@@ -523,113 +502,123 @@ future<executor::request_return_type> executor::describe_stream(client_state& cl
|
|||||||
// filter out cdc generations older than the table or now() - cdc::ttl (typically dynamodb_streams_max_window - 24h)
|
// filter out cdc generations older than the table or now() - cdc::ttl (typically dynamodb_streams_max_window - 24h)
|
||||||
auto low_ts = std::max(as_timepoint(schema->id()), db_clock::now() - ttl);
|
auto low_ts = std::max(as_timepoint(schema->id()), db_clock::now() - ttl);
|
||||||
|
|
||||||
std::map<db_clock::time_point, cdc::streams_version> topologies = co_await _sdks.cdc_get_versioned_streams(low_ts, { normal_token_owners });
|
return _sdks.cdc_get_versioned_streams(low_ts, { normal_token_owners }).then([db, shard_start, limit, ret = std::move(ret), stream_desc = std::move(stream_desc)] (std::map<db_clock::time_point, cdc::streams_version> topologies) mutable {
|
||||||
auto e = topologies.end();
|
|
||||||
auto prev = e;
|
|
||||||
auto shards = rjson::empty_array();
|
|
||||||
|
|
||||||
std::optional<shard_id> last;
|
auto e = topologies.end();
|
||||||
|
auto prev = e;
|
||||||
|
auto shards = rjson::empty_array();
|
||||||
|
|
||||||
auto i = topologies.begin();
|
std::optional<shard_id> last;
|
||||||
// if we're a paged query, skip to the generation where we left of.
|
|
||||||
if (shard_start) {
|
|
||||||
i = topologies.find(shard_start->time);
|
|
||||||
}
|
|
||||||
|
|
||||||
// for parent-child stuff we need id:s to be sorted by token
|
auto i = topologies.begin();
|
||||||
// (see explanation above) since we want to find closest
|
// if we're a paged query, skip to the generation where we left of.
|
||||||
// token boundary when determining parent.
|
if (shard_start) {
|
||||||
// #7346 - we processed and searched children/parents in
|
i = topologies.find(shard_start->time);
|
||||||
// stored order, which is not necessarily token order,
|
}
|
||||||
// so the finding of "closest" token boundary (using upper bound)
|
|
||||||
// could give somewhat weird results.
|
|
||||||
static auto token_cmp = [](const cdc::stream_id& id1, const cdc::stream_id& id2) {
|
|
||||||
return id1.token() < id2.token();
|
|
||||||
};
|
|
||||||
|
|
||||||
// #7409 - shards must be returned in lexicographical order,
|
// for parent-child stuff we need id:s to be sorted by token
|
||||||
// normal bytes compare is string_traits<int8_t>::compare.
|
// (see explanation above) since we want to find closest
|
||||||
// thus bytes 0x8000 is less than 0x0000. By doing unsigned
|
// token boundary when determining parent.
|
||||||
// compare instead we inadvertently will sort in string lexical.
|
// #7346 - we processed and searched children/parents in
|
||||||
static auto id_cmp = [](const cdc::stream_id& id1, const cdc::stream_id& id2) {
|
// stored order, which is not necessarily token order,
|
||||||
return compare_unsigned(id1.to_bytes(), id2.to_bytes()) < 0;
|
// so the finding of "closest" token boundary (using upper bound)
|
||||||
};
|
// could give somewhat weird results.
|
||||||
|
static auto token_cmp = [](const cdc::stream_id& id1, const cdc::stream_id& id2) {
|
||||||
// need a prev even if we are skipping stuff
|
return id1.token() < id2.token();
|
||||||
if (i != topologies.begin()) {
|
};
|
||||||
prev = std::prev(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (; limit > 0 && i != e; prev = i, ++i) {
|
|
||||||
auto& [ts, sv] = *i;
|
|
||||||
|
|
||||||
last = std::nullopt;
|
|
||||||
|
|
||||||
auto lo = sv.streams.begin();
|
|
||||||
auto end = sv.streams.end();
|
|
||||||
|
|
||||||
// #7409 - shards must be returned in lexicographical order,
|
// #7409 - shards must be returned in lexicographical order,
|
||||||
std::sort(lo, end, id_cmp);
|
// normal bytes compare is string_traits<int8_t>::compare.
|
||||||
|
// thus bytes 0x8000 is less than 0x0000. By doing unsigned
|
||||||
|
// compare instead we inadvertently will sort in string lexical.
|
||||||
|
static auto id_cmp = [](const cdc::stream_id& id1, const cdc::stream_id& id2) {
|
||||||
|
return compare_unsigned(id1.to_bytes(), id2.to_bytes()) < 0;
|
||||||
|
};
|
||||||
|
|
||||||
if (shard_start) {
|
// need a prev even if we are skipping stuff
|
||||||
// find next shard position
|
if (i != topologies.begin()) {
|
||||||
lo = std::upper_bound(lo, end, shard_start->id, id_cmp);
|
prev = std::prev(i);
|
||||||
shard_start = std::nullopt;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lo != end && prev != e) {
|
for (; limit > 0 && i != e; prev = i, ++i) {
|
||||||
// We want older stuff sorted in token order so we can find matching
|
auto& [ts, sv] = *i;
|
||||||
// token range when determining parent shard.
|
|
||||||
std::stable_sort(prev->second.streams.begin(), prev->second.streams.end(), token_cmp);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto expired = [&]() -> std::optional<db_clock::time_point> {
|
|
||||||
auto j = std::next(i);
|
|
||||||
if (j == e) {
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
// add this so we sort of match potential
|
|
||||||
// sequence numbers in get_records result.
|
|
||||||
return j->first + confidence_interval(db);
|
|
||||||
}();
|
|
||||||
|
|
||||||
while (lo != end) {
|
|
||||||
auto& id = *lo++;
|
|
||||||
|
|
||||||
auto shard = rjson::empty_object();
|
|
||||||
|
|
||||||
if (prev != e) {
|
|
||||||
auto &pid = find_parent_shard_in_previous_generation(prev->first, prev->second.streams, id);
|
|
||||||
rjson::add(shard, "ParentShardId", shard_id(prev->first, pid));
|
|
||||||
}
|
|
||||||
|
|
||||||
last.emplace(ts, id);
|
|
||||||
rjson::add(shard, "ShardId", *last);
|
|
||||||
auto range = rjson::empty_object();
|
|
||||||
rjson::add(range, "StartingSequenceNumber", sequence_number(utils::UUID_gen::min_time_UUID(ts.time_since_epoch())));
|
|
||||||
if (expired) {
|
|
||||||
rjson::add(range, "EndingSequenceNumber", sequence_number(utils::UUID_gen::min_time_UUID(expired->time_since_epoch())));
|
|
||||||
}
|
|
||||||
|
|
||||||
rjson::add(shard, "SequenceNumberRange", std::move(range));
|
|
||||||
rjson::push_back(shards, std::move(shard));
|
|
||||||
|
|
||||||
if (--limit == 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
last = std::nullopt;
|
last = std::nullopt;
|
||||||
|
|
||||||
|
auto lo = sv.streams.begin();
|
||||||
|
auto end = sv.streams.end();
|
||||||
|
|
||||||
|
// #7409 - shards must be returned in lexicographical order,
|
||||||
|
std::sort(lo, end, id_cmp);
|
||||||
|
|
||||||
|
if (shard_start) {
|
||||||
|
// find next shard position
|
||||||
|
lo = std::upper_bound(lo, end, shard_start->id, id_cmp);
|
||||||
|
shard_start = std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lo != end && prev != e) {
|
||||||
|
// We want older stuff sorted in token order so we can find matching
|
||||||
|
// token range when determining parent shard.
|
||||||
|
std::stable_sort(prev->second.streams.begin(), prev->second.streams.end(), token_cmp);
|
||||||
|
}
|
||||||
|
|
||||||
|
auto expired = [&]() -> std::optional<db_clock::time_point> {
|
||||||
|
auto j = std::next(i);
|
||||||
|
if (j == e) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
// add this so we sort of match potential
|
||||||
|
// sequence numbers in get_records result.
|
||||||
|
return j->first + confidence_interval(db);
|
||||||
|
}();
|
||||||
|
|
||||||
|
while (lo != end) {
|
||||||
|
auto& id = *lo++;
|
||||||
|
|
||||||
|
auto shard = rjson::empty_object();
|
||||||
|
|
||||||
|
if (prev != e) {
|
||||||
|
auto& pids = prev->second.streams;
|
||||||
|
auto pid = std::upper_bound(pids.begin(), pids.end(), id.token(), [](const dht::token& t, const cdc::stream_id& id) {
|
||||||
|
return t < id.token();
|
||||||
|
});
|
||||||
|
if (pid != pids.begin()) {
|
||||||
|
pid = std::prev(pid);
|
||||||
|
}
|
||||||
|
if (pid != pids.end()) {
|
||||||
|
rjson::add(shard, "ParentShardId", shard_id(prev->first, *pid));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
last.emplace(ts, id);
|
||||||
|
rjson::add(shard, "ShardId", *last);
|
||||||
|
auto range = rjson::empty_object();
|
||||||
|
rjson::add(range, "StartingSequenceNumber", sequence_number(utils::UUID_gen::min_time_UUID(ts.time_since_epoch())));
|
||||||
|
if (expired) {
|
||||||
|
rjson::add(range, "EndingSequenceNumber", sequence_number(utils::UUID_gen::min_time_UUID(expired->time_since_epoch())));
|
||||||
|
}
|
||||||
|
|
||||||
|
rjson::add(shard, "SequenceNumberRange", std::move(range));
|
||||||
|
rjson::push_back(shards, std::move(shard));
|
||||||
|
|
||||||
|
if (--limit == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
last = std::nullopt;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (last) {
|
if (last) {
|
||||||
rjson::add(stream_desc, "LastEvaluatedShardId", *last);
|
rjson::add(stream_desc, "LastEvaluatedShardId", *last);
|
||||||
}
|
}
|
||||||
|
|
||||||
rjson::add(stream_desc, "Shards", std::move(shards));
|
rjson::add(stream_desc, "Shards", std::move(shards));
|
||||||
rjson::add(ret, "StreamDescription", std::move(stream_desc));
|
rjson::add(ret, "StreamDescription", std::move(stream_desc));
|
||||||
|
|
||||||
co_return rjson::print(std::move(ret));
|
return make_ready_future<executor::request_return_type>(rjson::print(std::move(ret)));
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
enum class shard_iterator_type {
|
enum class shard_iterator_type {
|
||||||
@@ -909,169 +898,172 @@ future<executor::request_return_type> executor::get_records(client_state& client
|
|||||||
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, _proxy.get_max_result_size(partition_slice),
|
auto command = ::make_lw_shared<query::read_command>(schema->id(), schema->version(), partition_slice, _proxy.get_max_result_size(partition_slice),
|
||||||
query::tombstone_limit(_proxy.get_tombstone_limit()), query::row_limit(limit * mul));
|
query::tombstone_limit(_proxy.get_tombstone_limit()), query::row_limit(limit * mul));
|
||||||
|
|
||||||
service::storage_proxy::coordinator_query_result qr = co_await _proxy.query(schema, std::move(command), std::move(partition_ranges), cl, service::storage_proxy::coordinator_query_options(default_timeout(), std::move(permit), client_state));
|
co_return co_await _proxy.query(schema, std::move(command), std::move(partition_ranges), cl, service::storage_proxy::coordinator_query_options(default_timeout(), std::move(permit), client_state)).then(
|
||||||
cql3::selection::result_set_builder builder(*selection, gc_clock::now());
|
[this, schema, partition_slice = std::move(partition_slice), selection = std::move(selection), start_time = std::move(start_time), limit, key_names = std::move(key_names), attr_names = std::move(attr_names), type, iter, high_ts] (service::storage_proxy::coordinator_query_result qr) mutable {
|
||||||
query::result_view::consume(*qr.query_result, partition_slice, cql3::selection::result_set_builder::visitor(builder, *schema, *selection));
|
cql3::selection::result_set_builder builder(*selection, gc_clock::now());
|
||||||
|
query::result_view::consume(*qr.query_result, partition_slice, cql3::selection::result_set_builder::visitor(builder, *schema, *selection));
|
||||||
|
|
||||||
auto result_set = builder.build();
|
auto result_set = builder.build();
|
||||||
auto records = rjson::empty_array();
|
auto records = rjson::empty_array();
|
||||||
|
|
||||||
auto& metadata = result_set->get_metadata();
|
auto& metadata = result_set->get_metadata();
|
||||||
|
|
||||||
auto op_index = std::distance(metadata.get_names().begin(),
|
auto op_index = std::distance(metadata.get_names().begin(),
|
||||||
std::find_if(metadata.get_names().begin(), metadata.get_names().end(), [](const lw_shared_ptr<cql3::column_specification>& cdef) {
|
std::find_if(metadata.get_names().begin(), metadata.get_names().end(), [](const lw_shared_ptr<cql3::column_specification>& cdef) {
|
||||||
return cdef->name->name() == op_column_name;
|
return cdef->name->name() == op_column_name;
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
auto ts_index = std::distance(metadata.get_names().begin(),
|
auto ts_index = std::distance(metadata.get_names().begin(),
|
||||||
std::find_if(metadata.get_names().begin(), metadata.get_names().end(), [](const lw_shared_ptr<cql3::column_specification>& cdef) {
|
std::find_if(metadata.get_names().begin(), metadata.get_names().end(), [](const lw_shared_ptr<cql3::column_specification>& cdef) {
|
||||||
return cdef->name->name() == timestamp_column_name;
|
return cdef->name->name() == timestamp_column_name;
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
auto eor_index = std::distance(metadata.get_names().begin(),
|
auto eor_index = std::distance(metadata.get_names().begin(),
|
||||||
std::find_if(metadata.get_names().begin(), metadata.get_names().end(), [](const lw_shared_ptr<cql3::column_specification>& cdef) {
|
std::find_if(metadata.get_names().begin(), metadata.get_names().end(), [](const lw_shared_ptr<cql3::column_specification>& cdef) {
|
||||||
return cdef->name->name() == eor_column_name;
|
return cdef->name->name() == eor_column_name;
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
std::optional<utils::UUID> timestamp;
|
std::optional<utils::UUID> timestamp;
|
||||||
auto dynamodb = rjson::empty_object();
|
auto dynamodb = rjson::empty_object();
|
||||||
auto record = rjson::empty_object();
|
auto record = rjson::empty_object();
|
||||||
const auto dc_name = _proxy.get_token_metadata_ptr()->get_topology().get_datacenter();
|
const auto dc_name = _proxy.get_token_metadata_ptr()->get_topology().get_datacenter();
|
||||||
|
|
||||||
using op_utype = std::underlying_type_t<cdc::operation>;
|
using op_utype = std::underlying_type_t<cdc::operation>;
|
||||||
|
|
||||||
auto maybe_add_record = [&] {
|
auto maybe_add_record = [&] {
|
||||||
if (!dynamodb.ObjectEmpty()) {
|
if (!dynamodb.ObjectEmpty()) {
|
||||||
rjson::add(record, "dynamodb", std::move(dynamodb));
|
rjson::add(record, "dynamodb", std::move(dynamodb));
|
||||||
dynamodb = rjson::empty_object();
|
dynamodb = rjson::empty_object();
|
||||||
}
|
}
|
||||||
if (!record.ObjectEmpty()) {
|
if (!record.ObjectEmpty()) {
|
||||||
rjson::add(record, "awsRegion", rjson::from_string(dc_name));
|
rjson::add(record, "awsRegion", rjson::from_string(dc_name));
|
||||||
rjson::add(record, "eventID", event_id(iter.shard.id, *timestamp));
|
rjson::add(record, "eventID", event_id(iter.shard.id, *timestamp));
|
||||||
rjson::add(record, "eventSource", "scylladb:alternator");
|
rjson::add(record, "eventSource", "scylladb:alternator");
|
||||||
rjson::add(record, "eventVersion", "1.1");
|
rjson::add(record, "eventVersion", "1.1");
|
||||||
rjson::push_back(records, std::move(record));
|
rjson::push_back(records, std::move(record));
|
||||||
record = rjson::empty_object();
|
record = rjson::empty_object();
|
||||||
--limit;
|
--limit;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
for (auto& row : result_set->rows()) {
|
for (auto& row : result_set->rows()) {
|
||||||
auto op = static_cast<cdc::operation>(value_cast<op_utype>(data_type_for<op_utype>()->deserialize(*row[op_index])));
|
auto op = static_cast<cdc::operation>(value_cast<op_utype>(data_type_for<op_utype>()->deserialize(*row[op_index])));
|
||||||
auto ts = value_cast<utils::UUID>(data_type_for<utils::UUID>()->deserialize(*row[ts_index]));
|
auto ts = value_cast<utils::UUID>(data_type_for<utils::UUID>()->deserialize(*row[ts_index]));
|
||||||
auto eor = row[eor_index].has_value() ? value_cast<bool>(boolean_type->deserialize(*row[eor_index])) : false;
|
auto eor = row[eor_index].has_value() ? value_cast<bool>(boolean_type->deserialize(*row[eor_index])) : false;
|
||||||
|
|
||||||
if (!dynamodb.HasMember("Keys")) {
|
if (!dynamodb.HasMember("Keys")) {
|
||||||
auto keys = rjson::empty_object();
|
auto keys = rjson::empty_object();
|
||||||
describe_single_item(*selection, row, key_names, keys);
|
describe_single_item(*selection, row, key_names, keys);
|
||||||
rjson::add(dynamodb, "Keys", std::move(keys));
|
rjson::add(dynamodb, "Keys", std::move(keys));
|
||||||
rjson::add(dynamodb, "ApproximateCreationDateTime", utils::UUID_gen::unix_timestamp_in_sec(ts).count());
|
rjson::add(dynamodb, "ApproximateCreationDateTime", utils::UUID_gen::unix_timestamp_in_sec(ts).count());
|
||||||
rjson::add(dynamodb, "SequenceNumber", sequence_number(ts));
|
rjson::add(dynamodb, "SequenceNumber", sequence_number(ts));
|
||||||
rjson::add(dynamodb, "StreamViewType", type);
|
rjson::add(dynamodb, "StreamViewType", type);
|
||||||
// TODO: SizeBytes
|
// TODO: SizeBytes
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We merge rows with same timestamp into a single event.
|
* We merge rows with same timestamp into a single event.
|
||||||
* This is pretty much needed, because a CDC row typically
|
* This is pretty much needed, because a CDC row typically
|
||||||
* encodes ~half the info of an alternator write.
|
* encodes ~half the info of an alternator write.
|
||||||
*
|
*
|
||||||
* A big, big downside to how alternator records are written
|
* A big, big downside to how alternator records are written
|
||||||
* (i.e. CQL), is that the distinction between INSERT and UPDATE
|
* (i.e. CQL), is that the distinction between INSERT and UPDATE
|
||||||
* is somewhat lost/unmappable to actual eventName.
|
* is somewhat lost/unmappable to actual eventName.
|
||||||
* A write (currently) always looks like an insert+modify
|
* A write (currently) always looks like an insert+modify
|
||||||
* regardless whether we wrote existing record or not.
|
* regardless whether we wrote existing record or not.
|
||||||
*
|
*
|
||||||
* Maybe RMW ops could be done slightly differently so
|
* Maybe RMW ops could be done slightly differently so
|
||||||
* we can distinguish them here...
|
* we can distinguish them here...
|
||||||
*
|
*
|
||||||
* For now, all writes will become MODIFY.
|
* For now, all writes will become MODIFY.
|
||||||
*
|
*
|
||||||
* Note: we do not check the current pre/post
|
* Note: we do not check the current pre/post
|
||||||
* flags on CDC log, instead we use data to
|
* flags on CDC log, instead we use data to
|
||||||
* drive what is returned. This is (afaict)
|
* drive what is returned. This is (afaict)
|
||||||
* consistent with dynamo streams
|
* consistent with dynamo streams
|
||||||
*/
|
*/
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case cdc::operation::pre_image:
|
case cdc::operation::pre_image:
|
||||||
case cdc::operation::post_image:
|
case cdc::operation::post_image:
|
||||||
{
|
{
|
||||||
auto item = rjson::empty_object();
|
auto item = rjson::empty_object();
|
||||||
describe_single_item(*selection, row, attr_names, item, nullptr, true);
|
describe_single_item(*selection, row, attr_names, item, nullptr, true);
|
||||||
describe_single_item(*selection, row, key_names, item);
|
describe_single_item(*selection, row, key_names, item);
|
||||||
rjson::add(dynamodb, op == cdc::operation::pre_image ? "OldImage" : "NewImage", std::move(item));
|
rjson::add(dynamodb, op == cdc::operation::pre_image ? "OldImage" : "NewImage", std::move(item));
|
||||||
break;
|
|
||||||
}
|
|
||||||
case cdc::operation::update:
|
|
||||||
rjson::add(record, "eventName", "MODIFY");
|
|
||||||
break;
|
|
||||||
case cdc::operation::insert:
|
|
||||||
rjson::add(record, "eventName", "INSERT");
|
|
||||||
break;
|
|
||||||
case cdc::operation::service_row_delete:
|
|
||||||
case cdc::operation::service_partition_delete:
|
|
||||||
{
|
|
||||||
auto user_identity = rjson::empty_object();
|
|
||||||
rjson::add(user_identity, "Type", "Service");
|
|
||||||
rjson::add(user_identity, "PrincipalId", "dynamodb.amazonaws.com");
|
|
||||||
rjson::add(record, "userIdentity", std::move(user_identity));
|
|
||||||
rjson::add(record, "eventName", "REMOVE");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
rjson::add(record, "eventName", "REMOVE");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (eor) {
|
|
||||||
maybe_add_record();
|
|
||||||
timestamp = ts;
|
|
||||||
if (limit == 0) {
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case cdc::operation::update:
|
||||||
|
rjson::add(record, "eventName", "MODIFY");
|
||||||
|
break;
|
||||||
|
case cdc::operation::insert:
|
||||||
|
rjson::add(record, "eventName", "INSERT");
|
||||||
|
break;
|
||||||
|
case cdc::operation::service_row_delete:
|
||||||
|
case cdc::operation::service_partition_delete:
|
||||||
|
{
|
||||||
|
auto user_identity = rjson::empty_object();
|
||||||
|
rjson::add(user_identity, "Type", "Service");
|
||||||
|
rjson::add(user_identity, "PrincipalId", "dynamodb.amazonaws.com");
|
||||||
|
rjson::add(record, "userIdentity", std::move(user_identity));
|
||||||
|
rjson::add(record, "eventName", "REMOVE");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
rjson::add(record, "eventName", "REMOVE");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (eor) {
|
||||||
|
maybe_add_record();
|
||||||
|
timestamp = ts;
|
||||||
|
if (limit == 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
auto ret = rjson::empty_object();
|
auto ret = rjson::empty_object();
|
||||||
auto nrecords = records.Size();
|
auto nrecords = records.Size();
|
||||||
rjson::add(ret, "Records", std::move(records));
|
rjson::add(ret, "Records", std::move(records));
|
||||||
|
|
||||||
if (nrecords != 0) {
|
if (nrecords != 0) {
|
||||||
// #9642. Set next iterators threshold to > last
|
// #9642. Set next iterators threshold to > last
|
||||||
shard_iterator next_iter(iter.table, iter.shard, *timestamp, false);
|
shard_iterator next_iter(iter.table, iter.shard, *timestamp, false);
|
||||||
// Note that here we unconditionally return NextShardIterator,
|
// Note that here we unconditionally return NextShardIterator,
|
||||||
// without checking if maybe we reached the end-of-shard. If the
|
// without checking if maybe we reached the end-of-shard. If the
|
||||||
// shard did end, then the next read will have nrecords == 0 and
|
// shard did end, then the next read will have nrecords == 0 and
|
||||||
// will notice end end of shard and not return NextShardIterator.
|
// will notice end end of shard and not return NextShardIterator.
|
||||||
rjson::add(ret, "NextShardIterator", next_iter);
|
rjson::add(ret, "NextShardIterator", next_iter);
|
||||||
_stats.api_operations.get_records_latency.mark(std::chrono::steady_clock::now() - start_time);
|
_stats.api_operations.get_records_latency.mark(std::chrono::steady_clock::now() - start_time);
|
||||||
co_return rjson::print(std::move(ret));
|
return make_ready_future<executor::request_return_type>(rjson::print(std::move(ret)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// ugh. figure out if we are and end-of-shard
|
// ugh. figure out if we are and end-of-shard
|
||||||
auto normal_token_owners = _proxy.get_token_metadata_ptr()->count_normal_token_owners();
|
auto normal_token_owners = _proxy.get_token_metadata_ptr()->count_normal_token_owners();
|
||||||
|
|
||||||
db_clock::time_point ts = co_await _sdks.cdc_current_generation_timestamp({ normal_token_owners });
|
return _sdks.cdc_current_generation_timestamp({ normal_token_owners }).then([this, iter, high_ts, start_time, ret = std::move(ret)](db_clock::time_point ts) mutable {
|
||||||
auto& shard = iter.shard;
|
auto& shard = iter.shard;
|
||||||
|
|
||||||
if (shard.time < ts && ts < high_ts) {
|
if (shard.time < ts && ts < high_ts) {
|
||||||
// The DynamoDB documentation states that when a shard is
|
// The DynamoDB documentation states that when a shard is
|
||||||
// closed, reading it until the end has NextShardIterator
|
// closed, reading it until the end has NextShardIterator
|
||||||
// "set to null". Our test test_streams_closed_read
|
// "set to null". Our test test_streams_closed_read
|
||||||
// confirms that by "null" they meant not set at all.
|
// confirms that by "null" they meant not set at all.
|
||||||
} else {
|
} else {
|
||||||
// We could have return the same iterator again, but we did
|
// We could have return the same iterator again, but we did
|
||||||
// a search from it until high_ts and found nothing, so we
|
// a search from it until high_ts and found nothing, so we
|
||||||
// can also start the next search from high_ts.
|
// can also start the next search from high_ts.
|
||||||
// TODO: but why? It's simpler just to leave the iterator be.
|
// TODO: but why? It's simpler just to leave the iterator be.
|
||||||
shard_iterator next_iter(iter.table, iter.shard, utils::UUID_gen::min_time_UUID(high_ts.time_since_epoch()), true);
|
shard_iterator next_iter(iter.table, iter.shard, utils::UUID_gen::min_time_UUID(high_ts.time_since_epoch()), true);
|
||||||
rjson::add(ret, "NextShardIterator", iter);
|
rjson::add(ret, "NextShardIterator", iter);
|
||||||
}
|
}
|
||||||
_stats.api_operations.get_records_latency.mark(std::chrono::steady_clock::now() - start_time);
|
_stats.api_operations.get_records_latency.mark(std::chrono::steady_clock::now() - start_time);
|
||||||
if (is_big(ret)) {
|
if (is_big(ret)) {
|
||||||
co_return make_streamed(std::move(ret));
|
return make_ready_future<executor::request_return_type>(make_streamed(std::move(ret)));
|
||||||
}
|
}
|
||||||
co_return rjson::print(std::move(ret));
|
return make_ready_future<executor::request_return_type>(rjson::print(std::move(ret)));
|
||||||
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
bool executor::add_stream_options(const rjson::value& stream_specification, schema_builder& builder, service::storage_proxy& sp) {
|
bool executor::add_stream_options(const rjson::value& stream_specification, schema_builder& builder, service::storage_proxy& sp) {
|
||||||
|
|||||||
@@ -46,7 +46,6 @@
|
|||||||
#include "alternator/executor.hh"
|
#include "alternator/executor.hh"
|
||||||
#include "alternator/controller.hh"
|
#include "alternator/controller.hh"
|
||||||
#include "alternator/serialization.hh"
|
#include "alternator/serialization.hh"
|
||||||
#include "alternator/ttl_tag.hh"
|
|
||||||
#include "dht/sharder.hh"
|
#include "dht/sharder.hh"
|
||||||
#include "db/config.hh"
|
#include "db/config.hh"
|
||||||
#include "db/tags/utils.hh"
|
#include "db/tags/utils.hh"
|
||||||
@@ -58,10 +57,19 @@ static logging::logger tlogger("alternator_ttl");
|
|||||||
|
|
||||||
namespace alternator {
|
namespace alternator {
|
||||||
|
|
||||||
|
// We write the expiration-time attribute enabled on a table in a
|
||||||
|
// tag TTL_TAG_KEY.
|
||||||
|
// Currently, the *value* of this tag is simply the name of the attribute,
|
||||||
|
// and the expiration scanner interprets it as an Alternator attribute name -
|
||||||
|
// It can refer to a real column or if that doesn't exist, to a member of
|
||||||
|
// the ":attrs" map column. Although this is designed for Alternator, it may
|
||||||
|
// be good enough for CQL as well (there, the ":attrs" column won't exist).
|
||||||
|
extern const sstring TTL_TAG_KEY;
|
||||||
|
|
||||||
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
future<executor::request_return_type> executor::update_time_to_live(client_state& client_state, service_permit permit, rjson::value request) {
|
||||||
_stats.api_operations.update_time_to_live++;
|
_stats.api_operations.update_time_to_live++;
|
||||||
if (!_proxy.features().alternator_ttl) {
|
if (!_proxy.features().alternator_ttl) {
|
||||||
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Upgrade all nodes to a version that supports it.");
|
co_return api_error::unknown_operation("UpdateTimeToLive not yet supported. Experimental support is available if the 'alternator-ttl' experimental feature is enabled on all nodes.");
|
||||||
}
|
}
|
||||||
|
|
||||||
schema_ptr schema = get_table(_proxy, request);
|
schema_ptr schema = get_table(_proxy, request);
|
||||||
@@ -85,7 +93,7 @@ future<executor::request_return_type> executor::update_time_to_live(client_state
|
|||||||
if (v->GetStringLength() < 1 || v->GetStringLength() > 255) {
|
if (v->GetStringLength() < 1 || v->GetStringLength() > 255) {
|
||||||
co_return api_error::validation("The length of AttributeName must be between 1 and 255");
|
co_return api_error::validation("The length of AttributeName must be between 1 and 255");
|
||||||
}
|
}
|
||||||
sstring attribute_name = rjson::to_sstring(*v);
|
sstring attribute_name(v->GetString(), v->GetStringLength());
|
||||||
|
|
||||||
co_await verify_permission(_enforce_authorization, _warn_authorization, client_state, schema, auth::permission::ALTER, _stats);
|
co_await verify_permission(_enforce_authorization, _warn_authorization, client_state, schema, auth::permission::ALTER, _stats);
|
||||||
co_await db::modify_tags(_mm, schema->ks_name(), schema->cf_name(), [&](std::map<sstring, sstring>& tags_map) {
|
co_await db::modify_tags(_mm, schema->ks_name(), schema->cf_name(), [&](std::map<sstring, sstring>& tags_map) {
|
||||||
@@ -133,7 +141,7 @@ future<executor::request_return_type> executor::describe_time_to_live(client_sta
|
|||||||
|
|
||||||
// expiration_service is a sharded service responsible for cleaning up expired
|
// expiration_service is a sharded service responsible for cleaning up expired
|
||||||
// items in all tables with per-item expiration enabled. Currently, this means
|
// items in all tables with per-item expiration enabled. Currently, this means
|
||||||
// Alternator tables with TTL configured via an UpdateTimeToLive request.
|
// Alternator tables with TTL configured via a UpdateTimeToLive request.
|
||||||
//
|
//
|
||||||
// Here is a brief overview of how the expiration service works:
|
// Here is a brief overview of how the expiration service works:
|
||||||
//
|
//
|
||||||
@@ -316,7 +324,9 @@ static future<std::vector<std::pair<dht::token_range, locator::host_id>>> get_se
|
|||||||
const auto& tm = *erm->get_token_metadata_ptr();
|
const auto& tm = *erm->get_token_metadata_ptr();
|
||||||
const auto& sorted_tokens = tm.sorted_tokens();
|
const auto& sorted_tokens = tm.sorted_tokens();
|
||||||
std::vector<std::pair<dht::token_range, locator::host_id>> ret;
|
std::vector<std::pair<dht::token_range, locator::host_id>> ret;
|
||||||
throwing_assert(!sorted_tokens.empty());
|
if (sorted_tokens.empty()) {
|
||||||
|
on_internal_error(tlogger, "Token metadata is empty");
|
||||||
|
}
|
||||||
auto prev_tok = sorted_tokens.back();
|
auto prev_tok = sorted_tokens.back();
|
||||||
for (const auto& tok : sorted_tokens) {
|
for (const auto& tok : sorted_tokens) {
|
||||||
co_await coroutine::maybe_yield();
|
co_await coroutine::maybe_yield();
|
||||||
@@ -553,7 +563,7 @@ static future<> scan_table_ranges(
|
|||||||
expiration_service::stats& expiration_stats)
|
expiration_service::stats& expiration_stats)
|
||||||
{
|
{
|
||||||
const schema_ptr& s = scan_ctx.s;
|
const schema_ptr& s = scan_ctx.s;
|
||||||
throwing_assert(partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
SCYLLA_ASSERT (partition_ranges.size() == 1); // otherwise issue #9167 will cause incorrect results.
|
||||||
auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr,
|
auto p = service::pager::query_pagers::pager(proxy, s, scan_ctx.selection, *scan_ctx.query_state_ptr,
|
||||||
*scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr);
|
*scan_ctx.query_options, scan_ctx.command, std::move(partition_ranges), nullptr);
|
||||||
while (!p->is_exhausted()) {
|
while (!p->is_exhausted()) {
|
||||||
@@ -583,7 +593,7 @@ static future<> scan_table_ranges(
|
|||||||
if (retries >= 10) {
|
if (retries >= 10) {
|
||||||
// Don't get stuck forever asking the same page, maybe there's
|
// Don't get stuck forever asking the same page, maybe there's
|
||||||
// a bug or a real problem in several replicas. Give up on
|
// a bug or a real problem in several replicas. Give up on
|
||||||
// this scan and retry the scan from a random position later,
|
// this scan an retry the scan from a random position later,
|
||||||
// in the next scan period.
|
// in the next scan period.
|
||||||
throw runtime_exception("scanner thread failed after too many timeouts for the same page");
|
throw runtime_exception("scanner thread failed after too many timeouts for the same page");
|
||||||
}
|
}
|
||||||
@@ -630,38 +640,13 @@ static future<> scan_table_ranges(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// For a real column to contain an expiration time, it
|
// For a real column to contain an expiration time, it
|
||||||
// must be a numeric type. We currently support decimal
|
// must be a numeric type.
|
||||||
// (used by Alternator TTL) as well as bigint, int and
|
// FIXME: Currently we only support decimal_type (which is
|
||||||
// timestamp (used by CQL per-row TTL).
|
// what Alternator uses), but other numeric types can be
|
||||||
switch (meta[*expiration_column]->type->get_kind()) {
|
// supported as well to make this feature more useful in CQL.
|
||||||
case abstract_type::kind::decimal:
|
// Note that kind::decimal is also checked above.
|
||||||
// Used by Alternator TTL for key columns not stored
|
big_decimal n = value_cast<big_decimal>(v);
|
||||||
// in the map. The value is in seconds, fractional
|
expired = is_expired(n, now);
|
||||||
// part is ignored.
|
|
||||||
expired = is_expired(value_cast<big_decimal>(v), now);
|
|
||||||
break;
|
|
||||||
case abstract_type::kind::long_kind:
|
|
||||||
// Used by CQL per-row TTL. The value is in seconds.
|
|
||||||
expired = is_expired(gc_clock::time_point(std::chrono::seconds(value_cast<int64_t>(v))), now);
|
|
||||||
break;
|
|
||||||
case abstract_type::kind::int32:
|
|
||||||
// Used by CQL per-row TTL. The value is in seconds.
|
|
||||||
// Using int type is not recommended because it will
|
|
||||||
// overflow in 2038, but we support it to allow users
|
|
||||||
// to use existing int columns for expiration.
|
|
||||||
expired = is_expired(gc_clock::time_point(std::chrono::seconds(value_cast<int32_t>(v))), now);
|
|
||||||
break;
|
|
||||||
case abstract_type::kind::timestamp:
|
|
||||||
// Used by CQL per-row TTL. The value is in milliseconds
|
|
||||||
// but we truncate it to gc_clock's precision (whole seconds).
|
|
||||||
expired = is_expired(gc_clock::time_point(std::chrono::duration_cast<gc_clock::duration>(value_cast<db_clock::time_point>(v).time_since_epoch())), now);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
// Should never happen - we verified the column's type
|
|
||||||
// before starting the scan.
|
|
||||||
[[unlikely]]
|
|
||||||
on_internal_error(tlogger, format("expiration scanner value of unsupported type {} in column {}", meta[*expiration_column]->type->cql3_type_name(), scan_ctx.column_name) );
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (expired) {
|
if (expired) {
|
||||||
expiration_stats.items_deleted++;
|
expiration_stats.items_deleted++;
|
||||||
@@ -723,12 +708,16 @@ static future<bool> scan_table(
|
|||||||
co_return false;
|
co_return false;
|
||||||
}
|
}
|
||||||
// attribute_name may be one of the schema's columns (in Alternator, this
|
// attribute_name may be one of the schema's columns (in Alternator, this
|
||||||
// means a key column, in CQL it's a regular column), or an element in
|
// means it's a key column), or an element in Alternator's attrs map
|
||||||
// Alternator's attrs map encoded in Alternator's JSON encoding (which we
|
// encoded in Alternator's JSON encoding.
|
||||||
// decode). If attribute_name is a real column, in Alternator it will have
|
// FIXME: To make this less Alternators-specific, we should encode in the
|
||||||
// the type decimal, counting seconds since the UNIX epoch, while in CQL
|
// single key's value three things:
|
||||||
// it will one of the types bigint or int (counting seconds) or timestamp
|
// 1. The name of a column
|
||||||
// (counting milliseconds).
|
// 2. Optionally if column is a map, a member in the map
|
||||||
|
// 3. The deserializer for the value: CQL or Alternator (JSON).
|
||||||
|
// The deserializer can be guessed: If the given column or map item is
|
||||||
|
// numeric, it can be used directly. If it is a "bytes" type, it needs to
|
||||||
|
// be deserialized using Alternator's deserializer.
|
||||||
bytes column_name = to_bytes(*attribute_name);
|
bytes column_name = to_bytes(*attribute_name);
|
||||||
const column_definition *cd = s->get_column_definition(column_name);
|
const column_definition *cd = s->get_column_definition(column_name);
|
||||||
std::optional<std::string> member;
|
std::optional<std::string> member;
|
||||||
@@ -747,14 +736,11 @@ static future<bool> scan_table(
|
|||||||
data_type column_type = cd->type;
|
data_type column_type = cd->type;
|
||||||
// Verify that the column has the right type: If "member" exists
|
// Verify that the column has the right type: If "member" exists
|
||||||
// the column must be a map, and if it doesn't, the column must
|
// the column must be a map, and if it doesn't, the column must
|
||||||
// be decimal_type (Alternator), bigint, int or timestamp (CQL).
|
// (currently) be a decimal_type. If the column has the wrong type
|
||||||
// If the column has the wrong type nothing can get expired in
|
// nothing can get expired in this table, and it's pointless to
|
||||||
// this table, and it's pointless to scan it.
|
// scan it.
|
||||||
if ((member && column_type->get_kind() != abstract_type::kind::map) ||
|
if ((member && column_type->get_kind() != abstract_type::kind::map) ||
|
||||||
(!member && column_type->get_kind() != abstract_type::kind::decimal &&
|
(!member && column_type->get_kind() != abstract_type::kind::decimal)) {
|
||||||
column_type->get_kind() != abstract_type::kind::long_kind &&
|
|
||||||
column_type->get_kind() != abstract_type::kind::int32 &&
|
|
||||||
column_type->get_kind() != abstract_type::kind::timestamp)) {
|
|
||||||
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
|
tlogger.info("table {} TTL column has unsupported type, not scanning", s->cf_name());
|
||||||
co_return false;
|
co_return false;
|
||||||
}
|
}
|
||||||
@@ -781,7 +767,7 @@ static future<bool> scan_table(
|
|||||||
// by tasking another node to take over scanning of the dead node's primary
|
// by tasking another node to take over scanning of the dead node's primary
|
||||||
// ranges. What we do here is that this node will also check expiration
|
// ranges. What we do here is that this node will also check expiration
|
||||||
// on its *secondary* ranges - but only those whose primary owner is down.
|
// on its *secondary* ranges - but only those whose primary owner is down.
|
||||||
auto tablet_secondary_replica = tablet_map.get_secondary_replica(*tablet, erm->get_topology()); // throws if no secondary replica
|
auto tablet_secondary_replica = tablet_map.get_secondary_replica(*tablet); // throws if no secondary replica
|
||||||
if (tablet_secondary_replica.host == my_host_id && tablet_secondary_replica.shard == this_shard_id()) {
|
if (tablet_secondary_replica.host == my_host_id && tablet_secondary_replica.shard == this_shard_id()) {
|
||||||
if (!gossiper.is_alive(tablet_primary_replica.host)) {
|
if (!gossiper.is_alive(tablet_primary_replica.host)) {
|
||||||
co_await scan_tablet(*tablet, proxy, abort_source, page_sem, expiration_stats, scan_ctx, tablet_map);
|
co_await scan_tablet(*tablet, proxy, abort_source, page_sem, expiration_stats, scan_ctx, tablet_map);
|
||||||
@@ -892,10 +878,12 @@ future<> expiration_service::run() {
|
|||||||
future<> expiration_service::start() {
|
future<> expiration_service::start() {
|
||||||
// Called by main() on each shard to start the expiration-service
|
// Called by main() on each shard to start the expiration-service
|
||||||
// thread. Just runs run() in the background and allows stop().
|
// thread. Just runs run() in the background and allows stop().
|
||||||
if (!shutting_down()) {
|
if (_db.features().alternator_ttl) {
|
||||||
_end = run().handle_exception([] (std::exception_ptr ep) {
|
if (!shutting_down()) {
|
||||||
tlogger.error("expiration_service failed: {}", ep);
|
_end = run().handle_exception([] (std::exception_ptr ep) {
|
||||||
});
|
tlogger.error("expiration_service failed: {}", ep);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return make_ready_future<>();
|
return make_ready_future<>();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ namespace alternator {
|
|||||||
|
|
||||||
// expiration_service is a sharded service responsible for cleaning up expired
|
// expiration_service is a sharded service responsible for cleaning up expired
|
||||||
// items in all tables with per-item expiration enabled. Currently, this means
|
// items in all tables with per-item expiration enabled. Currently, this means
|
||||||
// Alternator tables with TTL configured via an UpdateTimeToLive request.
|
// Alternator tables with TTL configured via a UpdateTimeToLeave request.
|
||||||
class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
|
class expiration_service final : public seastar::peering_sharded_service<expiration_service> {
|
||||||
public:
|
public:
|
||||||
// Object holding per-shard statistics related to the expiration service.
|
// Object holding per-shard statistics related to the expiration service.
|
||||||
@@ -52,7 +52,7 @@ private:
|
|||||||
data_dictionary::database _db;
|
data_dictionary::database _db;
|
||||||
service::storage_proxy& _proxy;
|
service::storage_proxy& _proxy;
|
||||||
gms::gossiper& _gossiper;
|
gms::gossiper& _gossiper;
|
||||||
// _end is set by start(), and resolves when the background service
|
// _end is set by start(), and resolves when the the background service
|
||||||
// started by it ends. To ask the background service to end, _abort_source
|
// started by it ends. To ask the background service to end, _abort_source
|
||||||
// should be triggered. stop() below uses both _abort_source and _end.
|
// should be triggered. stop() below uses both _abort_source and _end.
|
||||||
std::optional<future<>> _end;
|
std::optional<future<>> _end;
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2026-present ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "seastarx.hh"
|
|
||||||
#include <seastar/core/sstring.hh>
|
|
||||||
|
|
||||||
namespace alternator {
|
|
||||||
// We use the table tag TTL_TAG_KEY ("system:ttl_attribute") to remember
|
|
||||||
// which attribute was chosen as the expiration-time attribute for
|
|
||||||
// Alternator's TTL and CQL's per-row TTL features.
|
|
||||||
// Currently, the *value* of this tag is simply the name of the attribute:
|
|
||||||
// It can refer to a real column or if that doesn't exist, to a member of
|
|
||||||
// the ":attrs" map column (which Alternator uses).
|
|
||||||
extern const sstring TTL_TAG_KEY;
|
|
||||||
} // namespace alternator
|
|
||||||
|
|
||||||
// let users use TTL_TAG_KEY without the "alternator::" prefix,
|
|
||||||
// to make it easier to move it to a different namespace later.
|
|
||||||
using alternator::TTL_TAG_KEY;
|
|
||||||
@@ -31,7 +31,6 @@ set(swagger_files
|
|||||||
api-doc/column_family.json
|
api-doc/column_family.json
|
||||||
api-doc/commitlog.json
|
api-doc/commitlog.json
|
||||||
api-doc/compaction_manager.json
|
api-doc/compaction_manager.json
|
||||||
api-doc/client_routes.json
|
|
||||||
api-doc/config.json
|
api-doc/config.json
|
||||||
api-doc/cql_server_test.json
|
api-doc/cql_server_test.json
|
||||||
api-doc/endpoint_snitch_info.json
|
api-doc/endpoint_snitch_info.json
|
||||||
@@ -69,7 +68,6 @@ target_sources(api
|
|||||||
PRIVATE
|
PRIVATE
|
||||||
api.cc
|
api.cc
|
||||||
cache_service.cc
|
cache_service.cc
|
||||||
client_routes.cc
|
|
||||||
collectd.cc
|
collectd.cc
|
||||||
column_family.cc
|
column_family.cc
|
||||||
commitlog.cc
|
commitlog.cc
|
||||||
|
|||||||
@@ -12,7 +12,7 @@
|
|||||||
"operations":[
|
"operations":[
|
||||||
{
|
{
|
||||||
"method":"POST",
|
"method":"POST",
|
||||||
"summary":"Resets authorized prepared statements cache",
|
"summary":"Reset cache",
|
||||||
"type":"void",
|
"type":"void",
|
||||||
"nickname":"authorization_cache_reset",
|
"nickname":"authorization_cache_reset",
|
||||||
"produces":[
|
"produces":[
|
||||||
|
|||||||
@@ -1,23 +0,0 @@
|
|||||||
, "client_routes_entry": {
|
|
||||||
"id": "client_routes_entry",
|
|
||||||
"summary": "An entry storing client routes",
|
|
||||||
"properties": {
|
|
||||||
"connection_id": {"type": "string"},
|
|
||||||
"host_id": {"type": "string", "format": "uuid"},
|
|
||||||
"address": {"type": "string"},
|
|
||||||
"port": {"type": "integer"},
|
|
||||||
"tls_port": {"type": "integer"},
|
|
||||||
"alternator_port": {"type": "integer"},
|
|
||||||
"alternator_https_port": {"type": "integer"}
|
|
||||||
},
|
|
||||||
"required": ["connection_id", "host_id", "address"]
|
|
||||||
}
|
|
||||||
, "client_routes_key": {
|
|
||||||
"id": "client_routes_key",
|
|
||||||
"summary": "A key of client_routes_entry",
|
|
||||||
"properties": {
|
|
||||||
"connection_id": {"type": "string"},
|
|
||||||
"host_id": {"type": "string", "format": "uuid"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
, "/v2/client-routes":{
|
|
||||||
"get": {
|
|
||||||
"description":"List all client route entries",
|
|
||||||
"operationId":"get_client_routes",
|
|
||||||
"tags":["client_routes"],
|
|
||||||
"produces":[
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"parameters":[],
|
|
||||||
"responses":{
|
|
||||||
"200":{
|
|
||||||
"schema":{
|
|
||||||
"type":"array",
|
|
||||||
"items":{ "$ref":"#/definitions/client_routes_entry" }
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"default":{
|
|
||||||
"description":"unexpected error",
|
|
||||||
"schema":{"$ref":"#/definitions/ErrorModel"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"post": {
|
|
||||||
"description":"Upsert one or more client route entries",
|
|
||||||
"operationId":"set_client_routes",
|
|
||||||
"tags":["client_routes"],
|
|
||||||
"parameters":[
|
|
||||||
{
|
|
||||||
"name":"body",
|
|
||||||
"in":"body",
|
|
||||||
"required":true,
|
|
||||||
"schema":{
|
|
||||||
"type":"array",
|
|
||||||
"items":{ "$ref":"#/definitions/client_routes_entry" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses":{
|
|
||||||
"200":{ "description": "OK" },
|
|
||||||
"default":{
|
|
||||||
"description":"unexpected error",
|
|
||||||
"schema":{ "$ref":"#/definitions/ErrorModel" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"delete": {
|
|
||||||
"description":"Delete one or more client route entries",
|
|
||||||
"operationId":"delete_client_routes",
|
|
||||||
"tags":["client_routes"],
|
|
||||||
"parameters":[
|
|
||||||
{
|
|
||||||
"name":"body",
|
|
||||||
"in":"body",
|
|
||||||
"required":true,
|
|
||||||
"schema":{
|
|
||||||
"type":"array",
|
|
||||||
"items":{ "$ref":"#/definitions/client_routes_key" }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"responses":{
|
|
||||||
"200":{
|
|
||||||
"description": "OK"
|
|
||||||
},
|
|
||||||
"default":{
|
|
||||||
"description":"unexpected error",
|
|
||||||
"schema":{
|
|
||||||
"$ref":"#/definitions/ErrorModel"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -243,7 +243,7 @@
|
|||||||
"GOSSIP_DIGEST_SYN",
|
"GOSSIP_DIGEST_SYN",
|
||||||
"GOSSIP_DIGEST_ACK2",
|
"GOSSIP_DIGEST_ACK2",
|
||||||
"GOSSIP_SHUTDOWN",
|
"GOSSIP_SHUTDOWN",
|
||||||
"UNUSED__DEFINITIONS_UPDATE",
|
"DEFINITIONS_UPDATE",
|
||||||
"TRUNCATE",
|
"TRUNCATE",
|
||||||
"UNUSED__REPLICATION_FINISHED",
|
"UNUSED__REPLICATION_FINISHED",
|
||||||
"MIGRATION_REQUEST",
|
"MIGRATION_REQUEST",
|
||||||
|
|||||||
@@ -1295,45 +1295,6 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"path":"/storage_service/logstor_compaction",
|
|
||||||
"operations":[
|
|
||||||
{
|
|
||||||
"method":"POST",
|
|
||||||
"summary":"Trigger compaction of the key-value storage",
|
|
||||||
"type":"void",
|
|
||||||
"nickname":"logstor_compaction",
|
|
||||||
"produces":[
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"parameters":[
|
|
||||||
{
|
|
||||||
"name":"major",
|
|
||||||
"description":"When true, perform a major compaction",
|
|
||||||
"required":false,
|
|
||||||
"allowMultiple":false,
|
|
||||||
"type":"boolean",
|
|
||||||
"paramType":"query"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"path":"/storage_service/logstor_flush",
|
|
||||||
"operations":[
|
|
||||||
{
|
|
||||||
"method":"POST",
|
|
||||||
"summary":"Trigger flush of logstor storage",
|
|
||||||
"type":"void",
|
|
||||||
"nickname":"logstor_flush",
|
|
||||||
"produces":[
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"parameters":[]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"path":"/storage_service/active_repair/",
|
"path":"/storage_service/active_repair/",
|
||||||
"operations":[
|
"operations":[
|
||||||
@@ -3124,48 +3085,6 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
|
||||||
"path":"/storage_service/tablets/snapshots",
|
|
||||||
"operations":[
|
|
||||||
{
|
|
||||||
"method":"POST",
|
|
||||||
"summary":"Takes the snapshot for the given keyspaces/tables. A snapshot name must be specified.",
|
|
||||||
"type":"void",
|
|
||||||
"nickname":"take_cluster_snapshot",
|
|
||||||
"produces":[
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"parameters":[
|
|
||||||
{
|
|
||||||
"name":"tag",
|
|
||||||
"description":"the tag given to the snapshot",
|
|
||||||
"required":true,
|
|
||||||
"allowMultiple":false,
|
|
||||||
"type":"string",
|
|
||||||
"paramType":"query"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name":"keyspace",
|
|
||||||
"description":"Keyspace(s) to snapshot. Multiple keyspaces can be provided using a comma-separated list. If omitted, snapshot all keyspaces.",
|
|
||||||
"required":false,
|
|
||||||
"allowMultiple":false,
|
|
||||||
"type":"string",
|
|
||||||
"paramType":"query"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name":"table",
|
|
||||||
"description":"Table(s) to snapshot. Multiple tables (in a single keyspace) can be provided using a comma-separated list. If omitted, snapshot all tables in the given keyspace(s).",
|
|
||||||
"required":false,
|
|
||||||
"allowMultiple":false,
|
|
||||||
"type":"string",
|
|
||||||
"paramType":"query"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
{
|
||||||
"path":"/storage_service/quiesce_topology",
|
"path":"/storage_service/quiesce_topology",
|
||||||
"operations":[
|
"operations":[
|
||||||
@@ -3268,38 +3187,6 @@
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"path":"/storage_service/logstor_info",
|
|
||||||
"operations":[
|
|
||||||
{
|
|
||||||
"method":"GET",
|
|
||||||
"summary":"Logstor segment information for one table",
|
|
||||||
"type":"table_logstor_info",
|
|
||||||
"nickname":"logstor_info",
|
|
||||||
"produces":[
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"parameters":[
|
|
||||||
{
|
|
||||||
"name":"keyspace",
|
|
||||||
"description":"The keyspace",
|
|
||||||
"required":true,
|
|
||||||
"allowMultiple":false,
|
|
||||||
"type":"string",
|
|
||||||
"paramType":"query"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name":"table",
|
|
||||||
"description":"table name",
|
|
||||||
"required":true,
|
|
||||||
"allowMultiple":false,
|
|
||||||
"type":"string",
|
|
||||||
"paramType":"query"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"path":"/storage_service/retrain_dict",
|
"path":"/storage_service/retrain_dict",
|
||||||
"operations":[
|
"operations":[
|
||||||
@@ -3708,47 +3595,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"logstor_hist_bucket":{
|
|
||||||
"id":"logstor_hist_bucket",
|
|
||||||
"properties":{
|
|
||||||
"bucket":{
|
|
||||||
"type":"long"
|
|
||||||
},
|
|
||||||
"count":{
|
|
||||||
"type":"long"
|
|
||||||
},
|
|
||||||
"min_data_size":{
|
|
||||||
"type":"long"
|
|
||||||
},
|
|
||||||
"max_data_size":{
|
|
||||||
"type":"long"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"table_logstor_info":{
|
|
||||||
"id":"table_logstor_info",
|
|
||||||
"description":"Per-table logstor segment distribution",
|
|
||||||
"properties":{
|
|
||||||
"keyspace":{
|
|
||||||
"type":"string"
|
|
||||||
},
|
|
||||||
"table":{
|
|
||||||
"type":"string"
|
|
||||||
},
|
|
||||||
"compaction_groups":{
|
|
||||||
"type":"long"
|
|
||||||
},
|
|
||||||
"segments":{
|
|
||||||
"type":"long"
|
|
||||||
},
|
|
||||||
"data_size_histogram":{
|
|
||||||
"type":"array",
|
|
||||||
"items":{
|
|
||||||
"$ref":"logstor_hist_bucket"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tablet_repair_result":{
|
"tablet_repair_result":{
|
||||||
"id":"tablet_repair_result",
|
"id":"tablet_repair_result",
|
||||||
"description":"Tablet repair result",
|
"description":"Tablet repair result",
|
||||||
|
|||||||
@@ -209,21 +209,6 @@
|
|||||||
"parameters":[]
|
"parameters":[]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"path":"/system/chosen_sstable_version",
|
|
||||||
"operations":[
|
|
||||||
{
|
|
||||||
"method":"GET",
|
|
||||||
"summary":"Get sstable version currently chosen for use in new sstables",
|
|
||||||
"type":"string",
|
|
||||||
"nickname":"get_chosen_sstable_version",
|
|
||||||
"produces":[
|
|
||||||
"application/json"
|
|
||||||
],
|
|
||||||
"parameters":[]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -349,9 +349,13 @@
|
|||||||
"type":"long",
|
"type":"long",
|
||||||
"description":"The shard the task is running on"
|
"description":"The shard the task is running on"
|
||||||
},
|
},
|
||||||
|
"creation_time":{
|
||||||
|
"type":"datetime",
|
||||||
|
"description":"The creation time of the task (when it was queued); extracted from the task_id UUID"
|
||||||
|
},
|
||||||
"start_time":{
|
"start_time":{
|
||||||
"type":"datetime",
|
"type":"datetime",
|
||||||
"description":"The start time of the task; unspecified (equal to epoch) when state == created"
|
"description":"The start time of the task (when execution began); unspecified (equal to epoch) when state == created"
|
||||||
},
|
},
|
||||||
"end_time":{
|
"end_time":{
|
||||||
"type":"datetime",
|
"type":"datetime",
|
||||||
@@ -398,13 +402,17 @@
|
|||||||
"type":"boolean",
|
"type":"boolean",
|
||||||
"description":"Boolean flag indicating whether the task can be aborted"
|
"description":"Boolean flag indicating whether the task can be aborted"
|
||||||
},
|
},
|
||||||
|
"creation_time":{
|
||||||
|
"type":"datetime",
|
||||||
|
"description":"The creation time of the task (when it was queued); extracted from the task_id UUID"
|
||||||
|
},
|
||||||
"start_time":{
|
"start_time":{
|
||||||
"type":"datetime",
|
"type":"datetime",
|
||||||
"description":"The start time of the task"
|
"description":"The start time of the task (when execution began); unspecified (equal to epoch) when state == created"
|
||||||
},
|
},
|
||||||
"end_time":{
|
"end_time":{
|
||||||
"type":"datetime",
|
"type":"datetime",
|
||||||
"description":"The end time of the task (unspecified when the task is not completed)"
|
"description":"The end time of the task (when execution completed); unspecified (equal to epoch) when the task is not completed"
|
||||||
},
|
},
|
||||||
"error":{
|
"error":{
|
||||||
"type":"string",
|
"type":"string",
|
||||||
|
|||||||
19
api/api.cc
19
api/api.cc
@@ -37,7 +37,6 @@
|
|||||||
#include "raft.hh"
|
#include "raft.hh"
|
||||||
#include "gms/gossip_address_map.hh"
|
#include "gms/gossip_address_map.hh"
|
||||||
#include "service_levels.hh"
|
#include "service_levels.hh"
|
||||||
#include "client_routes.hh"
|
|
||||||
|
|
||||||
logging::logger apilog("api");
|
logging::logger apilog("api");
|
||||||
|
|
||||||
@@ -68,11 +67,9 @@ future<> set_server_init(http_context& ctx) {
|
|||||||
rb02->set_api_doc(r);
|
rb02->set_api_doc(r);
|
||||||
rb02->register_api_file(r, "swagger20_header");
|
rb02->register_api_file(r, "swagger20_header");
|
||||||
rb02->register_api_file(r, "metrics");
|
rb02->register_api_file(r, "metrics");
|
||||||
rb02->register_api_file(r, "client_routes");
|
|
||||||
rb->register_function(r, "system",
|
rb->register_function(r, "system",
|
||||||
"The system related API");
|
"The system related API");
|
||||||
rb02->add_definitions_file(r, "metrics");
|
rb02->add_definitions_file(r, "metrics");
|
||||||
rb02->add_definitions_file(r, "client_routes");
|
|
||||||
set_system(ctx, r);
|
set_system(ctx, r);
|
||||||
rb->register_function(r, "error_injection",
|
rb->register_function(r, "error_injection",
|
||||||
"The error injection API");
|
"The error injection API");
|
||||||
@@ -122,9 +119,9 @@ future<> unset_thrift_controller(http_context& ctx) {
|
|||||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_thrift_controller(ctx, r); });
|
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_thrift_controller(ctx, r); });
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& ssc, service::raft_group0_client& group0_client) {
|
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client& group0_client) {
|
||||||
return ctx.http_server.set_routes([&ctx, &ss, &ssc, &group0_client] (routes& r) {
|
return ctx.http_server.set_routes([&ctx, &ss, &group0_client] (routes& r) {
|
||||||
set_storage_service(ctx, r, ss, ssc, group0_client);
|
set_storage_service(ctx, r, ss, group0_client);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -132,16 +129,6 @@ future<> unset_server_storage_service(http_context& ctx) {
|
|||||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_storage_service(ctx, r); });
|
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_storage_service(ctx, r); });
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> set_server_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr) {
|
|
||||||
return ctx.http_server.set_routes([&ctx, &cr] (routes& r) {
|
|
||||||
set_client_routes(ctx, r, cr);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> unset_server_client_routes(http_context& ctx) {
|
|
||||||
return ctx.http_server.set_routes([&ctx] (routes& r) { unset_client_routes(ctx, r); });
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> set_load_meter(http_context& ctx, service::load_meter& lm) {
|
future<> set_load_meter(http_context& ctx, service::load_meter& lm) {
|
||||||
return ctx.http_server.set_routes([&ctx, &lm] (routes& r) { set_load_meter(ctx, r, lm); });
|
return ctx.http_server.set_routes([&ctx, &lm] (routes& r) { set_load_meter(ctx, r, lm); });
|
||||||
}
|
}
|
||||||
|
|||||||
25
api/api.hh
25
api/api.hh
@@ -23,6 +23,31 @@
|
|||||||
|
|
||||||
namespace api {
|
namespace api {
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
std::vector<T> map_to_key_value(const std::map<sstring, sstring>& map) {
|
||||||
|
std::vector<T> res;
|
||||||
|
res.reserve(map.size());
|
||||||
|
|
||||||
|
for (const auto& [key, value] : map) {
|
||||||
|
res.push_back(T());
|
||||||
|
res.back().key = key;
|
||||||
|
res.back().value = value;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T, class MAP>
|
||||||
|
std::vector<T>& map_to_key_value(const MAP& map, std::vector<T>& res) {
|
||||||
|
res.reserve(res.size() + std::size(map));
|
||||||
|
|
||||||
|
for (const auto& [key, value] : map) {
|
||||||
|
T val;
|
||||||
|
val.key = fmt::to_string(key);
|
||||||
|
val.value = fmt::to_string(value);
|
||||||
|
res.push_back(val);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
template <typename T, typename S = T>
|
template <typename T, typename S = T>
|
||||||
T map_sum(T&& dest, const S& src) {
|
T map_sum(T&& dest, const S& src) {
|
||||||
for (const auto& i : src) {
|
for (const auto& i : src) {
|
||||||
|
|||||||
@@ -29,7 +29,6 @@ class storage_proxy;
|
|||||||
class storage_service;
|
class storage_service;
|
||||||
class raft_group0_client;
|
class raft_group0_client;
|
||||||
class raft_group_registry;
|
class raft_group_registry;
|
||||||
class client_routes_service;
|
|
||||||
|
|
||||||
} // namespace service
|
} // namespace service
|
||||||
|
|
||||||
@@ -98,10 +97,8 @@ future<> set_server_config(http_context& ctx, db::config& cfg);
|
|||||||
future<> unset_server_config(http_context& ctx);
|
future<> unset_server_config(http_context& ctx);
|
||||||
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
|
future<> set_server_snitch(http_context& ctx, sharded<locator::snitch_ptr>& snitch);
|
||||||
future<> unset_server_snitch(http_context& ctx);
|
future<> unset_server_snitch(http_context& ctx);
|
||||||
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>&, service::raft_group0_client&);
|
future<> set_server_storage_service(http_context& ctx, sharded<service::storage_service>& ss, service::raft_group0_client&);
|
||||||
future<> unset_server_storage_service(http_context& ctx);
|
future<> unset_server_storage_service(http_context& ctx);
|
||||||
future<> set_server_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr);
|
|
||||||
future<> unset_server_client_routes(http_context& ctx);
|
|
||||||
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
future<> set_server_sstables_loader(http_context& ctx, sharded<sstables_loader>& sst_loader);
|
||||||
future<> unset_server_sstables_loader(http_context& ctx);
|
future<> unset_server_sstables_loader(http_context& ctx);
|
||||||
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g);
|
future<> set_server_view_builder(http_context& ctx, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g);
|
||||||
|
|||||||
@@ -1,176 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2025-present ScyllaDB
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <seastar/http/short_streams.hh>
|
|
||||||
|
|
||||||
#include "client_routes.hh"
|
|
||||||
#include "api/api.hh"
|
|
||||||
#include "service/storage_service.hh"
|
|
||||||
#include "service/client_routes.hh"
|
|
||||||
#include "utils/rjson.hh"
|
|
||||||
|
|
||||||
|
|
||||||
#include "api/api-doc/client_routes.json.hh"
|
|
||||||
|
|
||||||
using namespace seastar::httpd;
|
|
||||||
using namespace std::chrono_literals;
|
|
||||||
using namespace json;
|
|
||||||
|
|
||||||
extern logging::logger apilog;
|
|
||||||
|
|
||||||
namespace api {
|
|
||||||
|
|
||||||
static void validate_client_routes_endpoint(sharded<service::client_routes_service>& cr, sstring endpoint_name) {
|
|
||||||
if (!cr.local().get_feature_service().client_routes) {
|
|
||||||
apilog.warn("{}: called before the cluster feature was enabled", endpoint_name);
|
|
||||||
throw std::runtime_error(fmt::format("{} requires all nodes to support the CLIENT_ROUTES cluster feature", endpoint_name));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static sstring parse_string(const char* name, rapidjson::Value const& v) {
|
|
||||||
const auto it = v.FindMember(name);
|
|
||||||
if (it == v.MemberEnd()) {
|
|
||||||
throw bad_param_exception(fmt::format("Missing '{}'", name));
|
|
||||||
}
|
|
||||||
if (!it->value.IsString()) {
|
|
||||||
throw bad_param_exception(fmt::format("'{}' must be a string", name));
|
|
||||||
}
|
|
||||||
return {it->value.GetString(), it->value.GetStringLength()};
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::optional<uint32_t> parse_port(const char* name, rapidjson::Value const& v) {
|
|
||||||
const auto it = v.FindMember(name);
|
|
||||||
if (it == v.MemberEnd()) {
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
if (!it->value.IsInt()) {
|
|
||||||
throw bad_param_exception(fmt::format("'{}' must be an integer", name));
|
|
||||||
}
|
|
||||||
auto port = it->value.GetInt();
|
|
||||||
if (port < 1 || port > 65535) {
|
|
||||||
throw bad_param_exception(fmt::format("'{}' value={} is outside the allowed port range", name, port));
|
|
||||||
}
|
|
||||||
return port;
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::vector<service::client_routes_service::client_route_entry> parse_set_client_array(const rapidjson::Document& root) {
|
|
||||||
if (!root.IsArray()) {
|
|
||||||
throw bad_param_exception("Body must be a JSON array");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<service::client_routes_service::client_route_entry> v;
|
|
||||||
v.reserve(root.GetArray().Size());
|
|
||||||
for (const auto& element : root.GetArray()) {
|
|
||||||
if (!element.IsObject()) { throw bad_param_exception("Each element must be object"); }
|
|
||||||
|
|
||||||
const auto port = parse_port("port", element);
|
|
||||||
const auto tls_port = parse_port("tls_port", element);
|
|
||||||
const auto alternator_port = parse_port("alternator_port", element);
|
|
||||||
const auto alternator_https_port = parse_port("alternator_https_port", element);
|
|
||||||
|
|
||||||
if (!port.has_value() && !tls_port.has_value() && !alternator_port.has_value() && !alternator_https_port.has_value()) {
|
|
||||||
throw bad_param_exception("At least one port field ('port', 'tls_port', 'alternator_port', 'alternator_https_port') must be specified");
|
|
||||||
}
|
|
||||||
|
|
||||||
v.emplace_back(
|
|
||||||
parse_string("connection_id", element),
|
|
||||||
utils::UUID{parse_string("host_id", element)},
|
|
||||||
parse_string("address", element),
|
|
||||||
port,
|
|
||||||
tls_port,
|
|
||||||
alternator_port,
|
|
||||||
alternator_https_port
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_set_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr, std::unique_ptr<http::request> req) {
|
|
||||||
validate_client_routes_endpoint(cr, "rest_set_client_routes");
|
|
||||||
|
|
||||||
rapidjson::Document root;
|
|
||||||
auto content = co_await util::read_entire_stream_contiguous(*req->content_stream);
|
|
||||||
root.Parse(content.c_str());
|
|
||||||
|
|
||||||
co_await cr.local().set_client_routes(parse_set_client_array(root));
|
|
||||||
co_return seastar::json::json_void();
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::vector<service::client_routes_service::client_route_key> parse_delete_client_array(const rapidjson::Document& root) {
|
|
||||||
if (!root.IsArray()) {
|
|
||||||
throw bad_param_exception("Body must be a JSON array");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<service::client_routes_service::client_route_key> v;
|
|
||||||
v.reserve(root.GetArray().Size());
|
|
||||||
for (const auto& element : root.GetArray()) {
|
|
||||||
v.emplace_back(
|
|
||||||
parse_string("connection_id", element),
|
|
||||||
utils::UUID{parse_string("host_id", element)}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return v;
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_delete_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr, std::unique_ptr<http::request> req) {
|
|
||||||
validate_client_routes_endpoint(cr, "delete_client_routes");
|
|
||||||
|
|
||||||
rapidjson::Document root;
|
|
||||||
auto content = co_await util::read_entire_stream_contiguous(*req->content_stream);
|
|
||||||
root.Parse(content.c_str());
|
|
||||||
|
|
||||||
co_await cr.local().delete_client_routes(parse_delete_client_array(root));
|
|
||||||
co_return seastar::json::json_void();
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_get_client_routes(http_context& ctx, sharded<service::client_routes_service>& cr, std::unique_ptr<http::request> req) {
|
|
||||||
validate_client_routes_endpoint(cr, "get_client_routes");
|
|
||||||
|
|
||||||
co_return co_await cr.invoke_on(0, [] (service::client_routes_service& cr) -> future<json::json_return_type> {
|
|
||||||
co_return json::json_return_type(stream_range_as_array(co_await cr.get_client_routes(), [](const service::client_routes_service::client_route_entry & entry) {
|
|
||||||
seastar::httpd::client_routes_json::client_routes_entry obj;
|
|
||||||
obj.connection_id = entry.connection_id;
|
|
||||||
obj.host_id = fmt::to_string(entry.host_id);
|
|
||||||
obj.address = entry.address;
|
|
||||||
if (entry.port.has_value()) { obj.port = entry.port.value(); }
|
|
||||||
if (entry.tls_port.has_value()) { obj.tls_port = entry.tls_port.value(); }
|
|
||||||
if (entry.alternator_port.has_value()) { obj.alternator_port = entry.alternator_port.value(); }
|
|
||||||
if (entry.alternator_https_port.has_value()) { obj.alternator_https_port = entry.alternator_https_port.value(); }
|
|
||||||
return obj;
|
|
||||||
}));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_client_routes(http_context& ctx, routes& r, sharded<service::client_routes_service>& cr) {
|
|
||||||
seastar::httpd::client_routes_json::set_client_routes.set(r, [&ctx, &cr] (std::unique_ptr<seastar::http::request> req) {
|
|
||||||
return rest_set_client_routes(ctx, cr, std::move(req));
|
|
||||||
});
|
|
||||||
seastar::httpd::client_routes_json::delete_client_routes.set(r, [&ctx, &cr] (std::unique_ptr<seastar::http::request> req) {
|
|
||||||
return rest_delete_client_routes(ctx, cr, std::move(req));
|
|
||||||
});
|
|
||||||
seastar::httpd::client_routes_json::get_client_routes.set(r, [&ctx, &cr] (std::unique_ptr<seastar::http::request> req) {
|
|
||||||
return rest_get_client_routes(ctx, cr, std::move(req));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void unset_client_routes(http_context& ctx, routes& r) {
|
|
||||||
seastar::httpd::client_routes_json::set_client_routes.unset(r);
|
|
||||||
seastar::httpd::client_routes_json::delete_client_routes.unset(r);
|
|
||||||
seastar::httpd::client_routes_json::get_client_routes.unset(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2025-present ScyllaDB
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
|
||||||
*/
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <seastar/core/sharded.hh>
|
|
||||||
#include <seastar/json/json_elements.hh>
|
|
||||||
#include "api/api_init.hh"
|
|
||||||
|
|
||||||
namespace api {
|
|
||||||
|
|
||||||
void set_client_routes(http_context& ctx, httpd::routes& r, sharded<service::client_routes_service>& cr);
|
|
||||||
void unset_client_routes(http_context& ctx, httpd::routes& r);
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -18,9 +18,7 @@
|
|||||||
#include "utils/assert.hh"
|
#include "utils/assert.hh"
|
||||||
#include "utils/estimated_histogram.hh"
|
#include "utils/estimated_histogram.hh"
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <sstream>
|
|
||||||
#include "db/data_listeners.hh"
|
#include "db/data_listeners.hh"
|
||||||
#include "utils/hash.hh"
|
|
||||||
#include "storage_service.hh"
|
#include "storage_service.hh"
|
||||||
#include "compaction/compaction_manager.hh"
|
#include "compaction/compaction_manager.hh"
|
||||||
#include "unimplemented.hh"
|
#include "unimplemented.hh"
|
||||||
@@ -344,56 +342,6 @@ uint64_t accumulate_on_active_memtables(replica::table& t, noncopyable_function<
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_toppartitions_generic(sharded<replica::database>& db, std::unique_ptr<http::request> req) {
|
|
||||||
bool filters_provided = false;
|
|
||||||
|
|
||||||
std::unordered_set<std::tuple<sstring, sstring>, utils::tuple_hash> table_filters {};
|
|
||||||
if (auto filters = req->get_query_param("table_filters"); !filters.empty()) {
|
|
||||||
filters_provided = true;
|
|
||||||
std::stringstream ss { filters };
|
|
||||||
std::string filter;
|
|
||||||
while (!filters.empty() && ss.good()) {
|
|
||||||
std::getline(ss, filter, ',');
|
|
||||||
table_filters.emplace(parse_fully_qualified_cf_name(filter));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unordered_set<sstring> keyspace_filters {};
|
|
||||||
if (auto filters = req->get_query_param("keyspace_filters"); !filters.empty()) {
|
|
||||||
filters_provided = true;
|
|
||||||
std::stringstream ss { filters };
|
|
||||||
std::string filter;
|
|
||||||
while (!filters.empty() && ss.good()) {
|
|
||||||
std::getline(ss, filter, ',');
|
|
||||||
keyspace_filters.emplace(std::move(filter));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// when the query is empty return immediately
|
|
||||||
if (filters_provided && table_filters.empty() && keyspace_filters.empty()) {
|
|
||||||
apilog.debug("toppartitions query: processing results");
|
|
||||||
cf::toppartitions_query_results results;
|
|
||||||
|
|
||||||
results.read_cardinality = 0;
|
|
||||||
results.write_cardinality = 0;
|
|
||||||
|
|
||||||
return make_ready_future<json::json_return_type>(results);
|
|
||||||
}
|
|
||||||
|
|
||||||
api::req_param<std::chrono::milliseconds, unsigned> duration{*req, "duration", 1000ms};
|
|
||||||
api::req_param<unsigned> capacity(*req, "capacity", 256);
|
|
||||||
api::req_param<unsigned> list_size(*req, "list_size", 10);
|
|
||||||
|
|
||||||
apilog.info("toppartitions query: #table_filters={} #keyspace_filters={} duration={} list_size={} capacity={}",
|
|
||||||
!table_filters.empty() ? std::to_string(table_filters.size()) : "all", !keyspace_filters.empty() ? std::to_string(keyspace_filters.size()) : "all", duration.value, list_size.value, capacity.value);
|
|
||||||
|
|
||||||
return seastar::do_with(db::toppartitions_query(db, std::move(table_filters), std::move(keyspace_filters), duration.value, list_size, capacity), [] (db::toppartitions_query& q) {
|
|
||||||
return run_toppartitions_query(q);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_column_family(http_context& ctx, routes& r, sharded<replica::database>& db) {
|
void set_column_family(http_context& ctx, routes& r, sharded<replica::database>& db) {
|
||||||
cf::get_column_family_name.set(r, [&db] (const_req req){
|
cf::get_column_family_name.set(r, [&db] (const_req req){
|
||||||
std::vector<sstring> res;
|
std::vector<sstring> res;
|
||||||
@@ -1099,10 +1047,6 @@ void set_column_family(http_context& ctx, routes& r, sharded<replica::database>&
|
|||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
ss::toppartitions_generic.set(r, [&db] (std::unique_ptr<http::request> req) {
|
|
||||||
return rest_toppartitions_generic(db, std::move(req));
|
|
||||||
});
|
|
||||||
|
|
||||||
cf::force_major_compaction.set(r, [&ctx, &db](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
cf::force_major_compaction.set(r, [&ctx, &db](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||||
if (!req->get_query_param("split_output").empty()) {
|
if (!req->get_query_param("split_output").empty()) {
|
||||||
fail(unimplemented::cause::API);
|
fail(unimplemented::cause::API);
|
||||||
@@ -1269,7 +1213,6 @@ void unset_column_family(http_context& ctx, routes& r) {
|
|||||||
cf::get_sstable_count_per_level.unset(r);
|
cf::get_sstable_count_per_level.unset(r);
|
||||||
cf::get_sstables_for_key.unset(r);
|
cf::get_sstables_for_key.unset(r);
|
||||||
cf::toppartitions.unset(r);
|
cf::toppartitions.unset(r);
|
||||||
ss::toppartitions_generic.unset(r);
|
|
||||||
cf::force_major_compaction.unset(r);
|
cf::force_major_compaction.unset(r);
|
||||||
ss::get_load.unset(r);
|
ss::get_load.unset(r);
|
||||||
ss::get_metrics_load.unset(r);
|
ss::get_metrics_load.unset(r);
|
||||||
|
|||||||
@@ -17,7 +17,9 @@
|
|||||||
#include "gms/feature_service.hh"
|
#include "gms/feature_service.hh"
|
||||||
#include "schema/schema_builder.hh"
|
#include "schema/schema_builder.hh"
|
||||||
#include "sstables/sstables_manager.hh"
|
#include "sstables/sstables_manager.hh"
|
||||||
|
#include "utils/hash.hh"
|
||||||
#include <optional>
|
#include <optional>
|
||||||
|
#include <sstream>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
@@ -513,15 +515,6 @@ void set_sstables_loader(http_context& ctx, routes& r, sharded<sstables_loader>&
|
|||||||
auto sstables = parsed.GetArray() |
|
auto sstables = parsed.GetArray() |
|
||||||
std::views::transform([] (const auto& s) { return sstring(rjson::to_string_view(s)); }) |
|
std::views::transform([] (const auto& s) { return sstring(rjson::to_string_view(s)); }) |
|
||||||
std::ranges::to<std::vector>();
|
std::ranges::to<std::vector>();
|
||||||
apilog.info("Restore invoked with following parameters: keyspace={}, table={}, endpoint={}, bucket={}, prefix={}, sstables_count={}, scope={}, primary_replica_only={}",
|
|
||||||
keyspace,
|
|
||||||
table,
|
|
||||||
endpoint,
|
|
||||||
bucket,
|
|
||||||
prefix,
|
|
||||||
sstables.size(),
|
|
||||||
scope,
|
|
||||||
primary_replica_only);
|
|
||||||
auto task_id = co_await sst_loader.local().download_new_sstables(keyspace, table, prefix, std::move(sstables), endpoint, bucket, scope, primary_replica_only);
|
auto task_id = co_await sst_loader.local().download_new_sstables(keyspace, table, prefix, std::move(sstables), endpoint, bucket, scope, primary_replica_only);
|
||||||
co_return json::json_return_type(fmt::to_string(task_id));
|
co_return json::json_return_type(fmt::to_string(task_id));
|
||||||
});
|
});
|
||||||
@@ -534,15 +527,13 @@ void unset_sstables_loader(http_context& ctx, routes& r) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g) {
|
void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_builder>& vb, sharded<gms::gossiper>& g) {
|
||||||
ss::view_build_statuses.set(r, [&ctx, &vb, &g] (std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
ss::view_build_statuses.set(r, [&ctx, &vb, &g] (std::unique_ptr<http::request> req) {
|
||||||
auto keyspace = validate_keyspace(ctx, req);
|
auto keyspace = validate_keyspace(ctx, req);
|
||||||
auto view = req->get_path_param("view");
|
auto view = req->get_path_param("view");
|
||||||
co_return json::json_return_type(stream_range_as_array(co_await vb.local().view_build_statuses(std::move(keyspace), std::move(view), g.local()), [] (const auto& i) {
|
return vb.local().view_build_statuses(std::move(keyspace), std::move(view), g.local()).then([] (std::unordered_map<sstring, sstring> status) {
|
||||||
storage_service_json::mapper res;
|
std::vector<storage_service_json::mapper> res;
|
||||||
res.key = i.first;
|
return make_ready_future<json::json_return_type>(map_to_key_value(std::move(status), res));
|
||||||
res.value = i.second;
|
});
|
||||||
return res;
|
|
||||||
}));
|
|
||||||
});
|
});
|
||||||
|
|
||||||
cf::get_built_indexes.set(r, [&vb](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
cf::get_built_indexes.set(r, [&vb](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||||
@@ -556,13 +547,17 @@ void set_view_builder(http_context& ctx, routes& r, sharded<db::view::view_build
|
|||||||
vp.insert(b.second);
|
vp.insert(b.second);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
std::vector<sstring> res;
|
||||||
replica::database& db = vb.local().get_db();
|
replica::database& db = vb.local().get_db();
|
||||||
auto uuid = validate_table(db, ks, cf_name);
|
auto uuid = validate_table(db, ks, cf_name);
|
||||||
replica::column_family& cf = db.find_column_family(uuid);
|
replica::column_family& cf = db.find_column_family(uuid);
|
||||||
co_return cf.get_index_manager().list_indexes()
|
res.reserve(cf.get_index_manager().list_indexes().size());
|
||||||
| std::views::transform([] (const auto& i) { return i.metadata().name(); })
|
for (auto&& i : cf.get_index_manager().list_indexes()) {
|
||||||
| std::views::filter([&vp] (const auto& n) { return vp.contains(secondary_index::index_table_name(n)); })
|
if (vp.contains(secondary_index::index_table_name(i.metadata().name()))) {
|
||||||
| std::ranges::to<std::vector>();
|
res.emplace_back(i.metadata().name());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
co_return res;
|
||||||
});
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -580,16 +575,6 @@ static future<json::json_return_type> describe_ring_as_json_for_table(const shar
|
|||||||
co_return json::json_return_type(stream_range_as_array(co_await ss.local().describe_ring_for_table(keyspace, table), token_range_endpoints_to_json));
|
co_return json::json_return_type(stream_range_as_array(co_await ss.local().describe_ring_for_table(keyspace, table), token_range_endpoints_to_json));
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
|
||||||
template <typename Key, typename Value>
|
|
||||||
storage_service_json::mapper map_to_json(const std::pair<Key, Value>& i) {
|
|
||||||
storage_service_json::mapper val;
|
|
||||||
val.key = fmt::to_string(i.first);
|
|
||||||
val.value = fmt::to_string(i.second);
|
|
||||||
return val;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
static
|
||||||
future<json::json_return_type>
|
future<json::json_return_type>
|
||||||
rest_get_token_endpoint(http_context& ctx, sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
rest_get_token_endpoint(http_context& ctx, sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
||||||
@@ -607,7 +592,62 @@ rest_get_token_endpoint(http_context& ctx, sharded<service::storage_service>& ss
|
|||||||
throw bad_param_exception("Either provide both keyspace and table (for tablet table) or neither (for vnodes)");
|
throw bad_param_exception("Either provide both keyspace and table (for tablet table) or neither (for vnodes)");
|
||||||
}
|
}
|
||||||
|
|
||||||
co_return json::json_return_type(stream_range_as_array(token_endpoints, &map_to_json<dht::token, gms::inet_address>));
|
co_return json::json_return_type(stream_range_as_array(token_endpoints, [](const auto& i) {
|
||||||
|
storage_service_json::mapper val;
|
||||||
|
val.key = fmt::to_string(i.first);
|
||||||
|
val.value = fmt::to_string(i.second);
|
||||||
|
return val;
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
future<json::json_return_type>
|
||||||
|
rest_toppartitions_generic(http_context& ctx, std::unique_ptr<http::request> req) {
|
||||||
|
bool filters_provided = false;
|
||||||
|
|
||||||
|
std::unordered_set<std::tuple<sstring, sstring>, utils::tuple_hash> table_filters {};
|
||||||
|
if (auto filters = req->get_query_param("table_filters"); !filters.empty()) {
|
||||||
|
filters_provided = true;
|
||||||
|
std::stringstream ss { filters };
|
||||||
|
std::string filter;
|
||||||
|
while (!filters.empty() && ss.good()) {
|
||||||
|
std::getline(ss, filter, ',');
|
||||||
|
table_filters.emplace(parse_fully_qualified_cf_name(filter));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unordered_set<sstring> keyspace_filters {};
|
||||||
|
if (auto filters = req->get_query_param("keyspace_filters"); !filters.empty()) {
|
||||||
|
filters_provided = true;
|
||||||
|
std::stringstream ss { filters };
|
||||||
|
std::string filter;
|
||||||
|
while (!filters.empty() && ss.good()) {
|
||||||
|
std::getline(ss, filter, ',');
|
||||||
|
keyspace_filters.emplace(std::move(filter));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// when the query is empty return immediately
|
||||||
|
if (filters_provided && table_filters.empty() && keyspace_filters.empty()) {
|
||||||
|
apilog.debug("toppartitions query: processing results");
|
||||||
|
httpd::column_family_json::toppartitions_query_results results;
|
||||||
|
|
||||||
|
results.read_cardinality = 0;
|
||||||
|
results.write_cardinality = 0;
|
||||||
|
|
||||||
|
return make_ready_future<json::json_return_type>(results);
|
||||||
|
}
|
||||||
|
|
||||||
|
api::req_param<std::chrono::milliseconds, unsigned> duration{*req, "duration", 1000ms};
|
||||||
|
api::req_param<unsigned> capacity(*req, "capacity", 256);
|
||||||
|
api::req_param<unsigned> list_size(*req, "list_size", 10);
|
||||||
|
|
||||||
|
apilog.info("toppartitions query: #table_filters={} #keyspace_filters={} duration={} list_size={} capacity={}",
|
||||||
|
!table_filters.empty() ? std::to_string(table_filters.size()) : "all", !keyspace_filters.empty() ? std::to_string(keyspace_filters.size()) : "all", duration.value, list_size.value, capacity.value);
|
||||||
|
|
||||||
|
return seastar::do_with(db::toppartitions_query(ctx.db, std::move(table_filters), std::move(keyspace_filters), duration.value, list_size, capacity), [] (db::toppartitions_query& q) {
|
||||||
|
return run_toppartitions_query(q);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
@@ -641,6 +681,7 @@ rest_get_range_to_endpoint_map(http_context& ctx, sharded<service::storage_servi
|
|||||||
table_id = validate_table(ctx.db.local(), keyspace, table);
|
table_id = validate_table(ctx.db.local(), keyspace, table);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<ss::maplist_mapper> res;
|
||||||
co_return stream_range_as_array(co_await ss.local().get_range_to_address_map(keyspace, table_id),
|
co_return stream_range_as_array(co_await ss.local().get_range_to_address_map(keyspace, table_id),
|
||||||
[](const std::pair<dht::token_range, inet_address_vector_replica_set>& entry){
|
[](const std::pair<dht::token_range, inet_address_vector_replica_set>& entry){
|
||||||
ss::maplist_mapper m;
|
ss::maplist_mapper m;
|
||||||
@@ -731,13 +772,17 @@ rest_cleanup_all(http_context& ctx, sharded<service::storage_service>& ss, std::
|
|||||||
|
|
||||||
apilog.info("cleanup_all global={}", global);
|
apilog.info("cleanup_all global={}", global);
|
||||||
|
|
||||||
if (global) {
|
auto done = !global ? false : co_await ss.invoke_on(0, [] (service::storage_service& ss) -> future<bool> {
|
||||||
co_await ss.invoke_on(0, [] (service::storage_service& ss) -> future<> {
|
if (!ss.is_topology_coordinator_enabled()) {
|
||||||
co_return co_await ss.do_clusterwide_vnodes_cleanup();
|
co_return false;
|
||||||
});
|
}
|
||||||
|
co_await ss.do_clusterwide_vnodes_cleanup();
|
||||||
|
co_return true;
|
||||||
|
});
|
||||||
|
if (done) {
|
||||||
co_return json::json_return_type(0);
|
co_return json::json_return_type(0);
|
||||||
}
|
}
|
||||||
// fall back to the local cleanup if local cleanup is requested
|
// fall back to the local cleanup if topology coordinator is not enabled or local cleanup is requested
|
||||||
auto& db = ctx.db;
|
auto& db = ctx.db;
|
||||||
auto& compaction_module = db.local().get_compaction_manager().get_task_manager_module();
|
auto& compaction_module = db.local().get_compaction_manager().get_task_manager_module();
|
||||||
auto task = co_await compaction_module.make_and_start_task<compaction::global_cleanup_compaction_task_impl>({}, db);
|
auto task = co_await compaction_module.make_and_start_task<compaction::global_cleanup_compaction_task_impl>({}, db);
|
||||||
@@ -745,7 +790,9 @@ rest_cleanup_all(http_context& ctx, sharded<service::storage_service>& ss, std::
|
|||||||
|
|
||||||
// Mark this node as clean
|
// Mark this node as clean
|
||||||
co_await ss.invoke_on(0, [] (service::storage_service& ss) -> future<> {
|
co_await ss.invoke_on(0, [] (service::storage_service& ss) -> future<> {
|
||||||
co_await ss.reset_cleanup_needed();
|
if (ss.is_topology_coordinator_enabled()) {
|
||||||
|
co_await ss.reset_cleanup_needed();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
co_return json::json_return_type(0);
|
co_return json::json_return_type(0);
|
||||||
@@ -756,6 +803,9 @@ future<json::json_return_type>
|
|||||||
rest_reset_cleanup_needed(http_context& ctx, sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
rest_reset_cleanup_needed(http_context& ctx, sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
||||||
apilog.info("reset_cleanup_needed");
|
apilog.info("reset_cleanup_needed");
|
||||||
co_await ss.invoke_on(0, [] (service::storage_service& ss) {
|
co_await ss.invoke_on(0, [] (service::storage_service& ss) {
|
||||||
|
if (!ss.is_topology_coordinator_enabled()) {
|
||||||
|
throw std::runtime_error("mark_node_as_clean is only supported when topology over raft is enabled");
|
||||||
|
}
|
||||||
return ss.reset_cleanup_needed();
|
return ss.reset_cleanup_needed();
|
||||||
});
|
});
|
||||||
co_return json_void();
|
co_return json_void();
|
||||||
@@ -783,31 +833,9 @@ rest_force_keyspace_flush(http_context& ctx, std::unique_ptr<http::request> req)
|
|||||||
|
|
||||||
static
|
static
|
||||||
future<json::json_return_type>
|
future<json::json_return_type>
|
||||||
rest_logstor_compaction(http_context& ctx, std::unique_ptr<http::request> req) {
|
rest_decommission(sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
||||||
bool major = false;
|
|
||||||
if (auto major_param = req->get_query_param("major"); !major_param.empty()) {
|
|
||||||
major = validate_bool(major_param);
|
|
||||||
}
|
|
||||||
apilog.info("logstor_compaction: major={}", major);
|
|
||||||
auto& db = ctx.db;
|
|
||||||
co_await replica::database::trigger_logstor_compaction_on_all_shards(db, major);
|
|
||||||
co_return json_void();
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_logstor_flush(http_context& ctx, std::unique_ptr<http::request> req) {
|
|
||||||
apilog.info("logstor_flush");
|
|
||||||
auto& db = ctx.db;
|
|
||||||
co_await replica::database::flush_logstor_separator_on_all_shards(db);
|
|
||||||
co_return json_void();
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_decommission(sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& ssc, std::unique_ptr<http::request> req) {
|
|
||||||
apilog.info("decommission");
|
apilog.info("decommission");
|
||||||
return ss.local().decommission(ssc).then([] {
|
return ss.local().decommission().then([] {
|
||||||
return make_ready_future<json::json_return_type>(json_void());
|
return make_ready_future<json::json_return_type>(json_void());
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -1284,7 +1312,10 @@ rest_get_ownership(http_context& ctx, sharded<service::storage_service>& ss, std
|
|||||||
throw httpd::bad_param_exception("storage_service/ownership cannot be used when a keyspace uses tablets");
|
throw httpd::bad_param_exception("storage_service/ownership cannot be used when a keyspace uses tablets");
|
||||||
}
|
}
|
||||||
|
|
||||||
co_return json::json_return_type(stream_range_as_array(co_await ss.local().get_ownership(), &map_to_json<gms::inet_address, float>));
|
return ss.local().get_ownership().then([] (auto&& ownership) {
|
||||||
|
std::vector<storage_service_json::mapper> res;
|
||||||
|
return make_ready_future<json::json_return_type>(map_to_key_value(ownership, res));
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
@@ -1301,7 +1332,10 @@ rest_get_effective_ownership(http_context& ctx, sharded<service::storage_service
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
co_return json::json_return_type(stream_range_as_array(co_await ss.local().effective_ownership(keyspace_name, table_name), &map_to_json<gms::inet_address, float>));
|
return ss.local().effective_ownership(keyspace_name, table_name).then([] (auto&& ownership) {
|
||||||
|
std::vector<storage_service_json::mapper> res;
|
||||||
|
return make_ready_future<json::json_return_type>(map_to_key_value(ownership, res));
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
@@ -1311,7 +1345,7 @@ rest_estimate_compression_ratios(http_context& ctx, sharded<service::storage_ser
|
|||||||
apilog.warn("estimate_compression_ratios: called before the cluster feature was enabled");
|
apilog.warn("estimate_compression_ratios: called before the cluster feature was enabled");
|
||||||
throw std::runtime_error("estimate_compression_ratios requires all nodes to support the SSTABLE_COMPRESSION_DICTS cluster feature");
|
throw std::runtime_error("estimate_compression_ratios requires all nodes to support the SSTABLE_COMPRESSION_DICTS cluster feature");
|
||||||
}
|
}
|
||||||
auto ticket = co_await get_units(ss.local().get_do_sample_sstables_concurrency_limiter(), 1);
|
auto ticket = get_units(ss.local().get_do_sample_sstables_concurrency_limiter(), 1);
|
||||||
auto ks = api::req_param<sstring>(*req, "keyspace", {}).value;
|
auto ks = api::req_param<sstring>(*req, "keyspace", {}).value;
|
||||||
auto cf = api::req_param<sstring>(*req, "cf", {}).value;
|
auto cf = api::req_param<sstring>(*req, "cf", {}).value;
|
||||||
apilog.debug("estimate_compression_ratios: called with ks={} cf={}", ks, cf);
|
apilog.debug("estimate_compression_ratios: called with ks={} cf={}", ks, cf);
|
||||||
@@ -1377,7 +1411,7 @@ rest_retrain_dict(http_context& ctx, sharded<service::storage_service>& ss, serv
|
|||||||
apilog.warn("retrain_dict: called before the cluster feature was enabled");
|
apilog.warn("retrain_dict: called before the cluster feature was enabled");
|
||||||
throw std::runtime_error("retrain_dict requires all nodes to support the SSTABLE_COMPRESSION_DICTS cluster feature");
|
throw std::runtime_error("retrain_dict requires all nodes to support the SSTABLE_COMPRESSION_DICTS cluster feature");
|
||||||
}
|
}
|
||||||
auto ticket = co_await get_units(ss.local().get_do_sample_sstables_concurrency_limiter(), 1);
|
auto ticket = get_units(ss.local().get_do_sample_sstables_concurrency_limiter(), 1);
|
||||||
auto ks = api::req_param<sstring>(*req, "keyspace", {}).value;
|
auto ks = api::req_param<sstring>(*req, "keyspace", {}).value;
|
||||||
auto cf = api::req_param<sstring>(*req, "cf", {}).value;
|
auto cf = api::req_param<sstring>(*req, "cf", {}).value;
|
||||||
apilog.debug("retrain_dict: called with ks={} cf={}", ks, cf);
|
apilog.debug("retrain_dict: called with ks={} cf={}", ks, cf);
|
||||||
@@ -1523,54 +1557,6 @@ rest_sstable_info(http_context& ctx, std::unique_ptr<http::request> req) {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
|
||||||
future<json::json_return_type>
|
|
||||||
rest_logstor_info(http_context& ctx, std::unique_ptr<http::request> req) {
|
|
||||||
auto keyspace = api::req_param<sstring>(*req, "keyspace", {}).value;
|
|
||||||
auto table = api::req_param<sstring>(*req, "table", {}).value;
|
|
||||||
if (table.empty()) {
|
|
||||||
table = api::req_param<sstring>(*req, "cf", {}).value;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (keyspace.empty()) {
|
|
||||||
throw bad_param_exception("The query parameter 'keyspace' is required");
|
|
||||||
}
|
|
||||||
if (table.empty()) {
|
|
||||||
throw bad_param_exception("The query parameter 'table' is required");
|
|
||||||
}
|
|
||||||
|
|
||||||
keyspace = validate_keyspace(ctx, keyspace);
|
|
||||||
auto tid = validate_table(ctx.db.local(), keyspace, table);
|
|
||||||
|
|
||||||
auto& cf = ctx.db.local().find_column_family(tid);
|
|
||||||
if (!cf.uses_logstor()) {
|
|
||||||
throw bad_param_exception(fmt::format("Table {}.{} does not use logstor", keyspace, table));
|
|
||||||
}
|
|
||||||
|
|
||||||
return do_with(replica::logstor::table_segment_stats{}, [keyspace = std::move(keyspace), table = std::move(table), tid, &ctx] (replica::logstor::table_segment_stats& merged_stats) {
|
|
||||||
return ctx.db.map_reduce([&merged_stats](replica::logstor::table_segment_stats&& shard_stats) {
|
|
||||||
merged_stats += shard_stats;
|
|
||||||
}, [tid](const replica::database& db) {
|
|
||||||
return db.get_logstor_table_segment_stats(tid);
|
|
||||||
}).then([&merged_stats, keyspace = std::move(keyspace), table = std::move(table)] {
|
|
||||||
ss::table_logstor_info result;
|
|
||||||
result.keyspace = keyspace;
|
|
||||||
result.table = table;
|
|
||||||
result.compaction_groups = merged_stats.compaction_group_count;
|
|
||||||
result.segments = merged_stats.segment_count;
|
|
||||||
|
|
||||||
for (const auto& bucket : merged_stats.histogram) {
|
|
||||||
ss::logstor_hist_bucket hist;
|
|
||||||
hist.count = bucket.count;
|
|
||||||
hist.max_data_size = bucket.max_data_size;
|
|
||||||
result.data_size_histogram.push(std::move(hist));
|
|
||||||
}
|
|
||||||
|
|
||||||
return make_ready_future<json::json_return_type>(stream_object(result));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
static
|
||||||
future<json::json_return_type>
|
future<json::json_return_type>
|
||||||
rest_reload_raft_topology_state(sharded<service::storage_service>& ss, service::raft_group0_client& group0_client, std::unique_ptr<http::request> req) {
|
rest_reload_raft_topology_state(sharded<service::storage_service>& ss, service::raft_group0_client& group0_client, std::unique_ptr<http::request> req) {
|
||||||
@@ -1583,14 +1569,26 @@ rest_reload_raft_topology_state(sharded<service::storage_service>& ss, service::
|
|||||||
static
|
static
|
||||||
future<json::json_return_type>
|
future<json::json_return_type>
|
||||||
rest_upgrade_to_raft_topology(sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
rest_upgrade_to_raft_topology(sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
||||||
apilog.info("Requested to schedule upgrade to raft topology, but this version does not need it since it uses raft topology by default.");
|
apilog.info("Requested to schedule upgrade to raft topology");
|
||||||
|
try {
|
||||||
|
co_await ss.invoke_on(0, [] (auto& ss) {
|
||||||
|
return ss.start_upgrade_to_raft_topology();
|
||||||
|
});
|
||||||
|
} catch (...) {
|
||||||
|
auto ex = std::current_exception();
|
||||||
|
apilog.error("Failed to schedule upgrade to raft topology: {}", ex);
|
||||||
|
std::rethrow_exception(std::move(ex));
|
||||||
|
}
|
||||||
co_return json_void();
|
co_return json_void();
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
future<json::json_return_type>
|
future<json::json_return_type>
|
||||||
rest_raft_topology_upgrade_status(sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
rest_raft_topology_upgrade_status(sharded<service::storage_service>& ss, std::unique_ptr<http::request> req) {
|
||||||
co_return sstring("done");
|
const auto ustate = co_await ss.invoke_on(0, [] (auto& ss) {
|
||||||
|
return ss.get_topology_upgrade_state();
|
||||||
|
});
|
||||||
|
co_return sstring(format("{}", ustate));
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
@@ -1800,8 +1798,9 @@ rest_bind(FuncType func, BindArgs&... args) {
|
|||||||
return std::bind_front(func, std::ref(args)...);
|
return std::bind_front(func, std::ref(args)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>& ssc, service::raft_group0_client& group0_client) {
|
void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_service>& ss, service::raft_group0_client& group0_client) {
|
||||||
ss::get_token_endpoint.set(r, rest_bind(rest_get_token_endpoint, ctx, ss));
|
ss::get_token_endpoint.set(r, rest_bind(rest_get_token_endpoint, ctx, ss));
|
||||||
|
ss::toppartitions_generic.set(r, rest_bind(rest_toppartitions_generic, ctx));
|
||||||
ss::get_release_version.set(r, rest_bind(rest_get_release_version, ss));
|
ss::get_release_version.set(r, rest_bind(rest_get_release_version, ss));
|
||||||
ss::get_scylla_release_version.set(r, rest_bind(rest_get_scylla_release_version, ss));
|
ss::get_scylla_release_version.set(r, rest_bind(rest_get_scylla_release_version, ss));
|
||||||
ss::get_schema_version.set(r, rest_bind(rest_get_schema_version, ss));
|
ss::get_schema_version.set(r, rest_bind(rest_get_schema_version, ss));
|
||||||
@@ -1816,9 +1815,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
|||||||
ss::reset_cleanup_needed.set(r, rest_bind(rest_reset_cleanup_needed, ctx, ss));
|
ss::reset_cleanup_needed.set(r, rest_bind(rest_reset_cleanup_needed, ctx, ss));
|
||||||
ss::force_flush.set(r, rest_bind(rest_force_flush, ctx));
|
ss::force_flush.set(r, rest_bind(rest_force_flush, ctx));
|
||||||
ss::force_keyspace_flush.set(r, rest_bind(rest_force_keyspace_flush, ctx));
|
ss::force_keyspace_flush.set(r, rest_bind(rest_force_keyspace_flush, ctx));
|
||||||
ss::decommission.set(r, rest_bind(rest_decommission, ss, ssc));
|
ss::decommission.set(r, rest_bind(rest_decommission, ss));
|
||||||
ss::logstor_compaction.set(r, rest_bind(rest_logstor_compaction, ctx));
|
|
||||||
ss::logstor_flush.set(r, rest_bind(rest_logstor_flush, ctx));
|
|
||||||
ss::move.set(r, rest_bind(rest_move, ss));
|
ss::move.set(r, rest_bind(rest_move, ss));
|
||||||
ss::remove_node.set(r, rest_bind(rest_remove_node, ss));
|
ss::remove_node.set(r, rest_bind(rest_remove_node, ss));
|
||||||
ss::exclude_node.set(r, rest_bind(rest_exclude_node, ss));
|
ss::exclude_node.set(r, rest_bind(rest_exclude_node, ss));
|
||||||
@@ -1867,7 +1864,6 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
|||||||
ss::retrain_dict.set(r, rest_bind(rest_retrain_dict, ctx, ss, group0_client));
|
ss::retrain_dict.set(r, rest_bind(rest_retrain_dict, ctx, ss, group0_client));
|
||||||
ss::estimate_compression_ratios.set(r, rest_bind(rest_estimate_compression_ratios, ctx, ss));
|
ss::estimate_compression_ratios.set(r, rest_bind(rest_estimate_compression_ratios, ctx, ss));
|
||||||
ss::sstable_info.set(r, rest_bind(rest_sstable_info, ctx));
|
ss::sstable_info.set(r, rest_bind(rest_sstable_info, ctx));
|
||||||
ss::logstor_info.set(r, rest_bind(rest_logstor_info, ctx));
|
|
||||||
ss::reload_raft_topology_state.set(r, rest_bind(rest_reload_raft_topology_state, ss, group0_client));
|
ss::reload_raft_topology_state.set(r, rest_bind(rest_reload_raft_topology_state, ss, group0_client));
|
||||||
ss::upgrade_to_raft_topology.set(r, rest_bind(rest_upgrade_to_raft_topology, ss));
|
ss::upgrade_to_raft_topology.set(r, rest_bind(rest_upgrade_to_raft_topology, ss));
|
||||||
ss::raft_topology_upgrade_status.set(r, rest_bind(rest_raft_topology_upgrade_status, ss));
|
ss::raft_topology_upgrade_status.set(r, rest_bind(rest_raft_topology_upgrade_status, ss));
|
||||||
@@ -1884,6 +1880,7 @@ void set_storage_service(http_context& ctx, routes& r, sharded<service::storage_
|
|||||||
|
|
||||||
void unset_storage_service(http_context& ctx, routes& r) {
|
void unset_storage_service(http_context& ctx, routes& r) {
|
||||||
ss::get_token_endpoint.unset(r);
|
ss::get_token_endpoint.unset(r);
|
||||||
|
ss::toppartitions_generic.unset(r);
|
||||||
ss::get_release_version.unset(r);
|
ss::get_release_version.unset(r);
|
||||||
ss::get_scylla_release_version.unset(r);
|
ss::get_scylla_release_version.unset(r);
|
||||||
ss::get_schema_version.unset(r);
|
ss::get_schema_version.unset(r);
|
||||||
@@ -1897,8 +1894,6 @@ void unset_storage_service(http_context& ctx, routes& r) {
|
|||||||
ss::reset_cleanup_needed.unset(r);
|
ss::reset_cleanup_needed.unset(r);
|
||||||
ss::force_flush.unset(r);
|
ss::force_flush.unset(r);
|
||||||
ss::force_keyspace_flush.unset(r);
|
ss::force_keyspace_flush.unset(r);
|
||||||
ss::logstor_compaction.unset(r);
|
|
||||||
ss::logstor_flush.unset(r);
|
|
||||||
ss::decommission.unset(r);
|
ss::decommission.unset(r);
|
||||||
ss::move.unset(r);
|
ss::move.unset(r);
|
||||||
ss::remove_node.unset(r);
|
ss::remove_node.unset(r);
|
||||||
@@ -1946,7 +1941,6 @@ void unset_storage_service(http_context& ctx, routes& r) {
|
|||||||
ss::get_ownership.unset(r);
|
ss::get_ownership.unset(r);
|
||||||
ss::get_effective_ownership.unset(r);
|
ss::get_effective_ownership.unset(r);
|
||||||
ss::sstable_info.unset(r);
|
ss::sstable_info.unset(r);
|
||||||
ss::logstor_info.unset(r);
|
|
||||||
ss::reload_raft_topology_state.unset(r);
|
ss::reload_raft_topology_state.unset(r);
|
||||||
ss::upgrade_to_raft_topology.unset(r);
|
ss::upgrade_to_raft_topology.unset(r);
|
||||||
ss::raft_topology_upgrade_status.unset(r);
|
ss::raft_topology_upgrade_status.unset(r);
|
||||||
@@ -2026,16 +2020,12 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
|||||||
auto tag = req->get_query_param("tag");
|
auto tag = req->get_query_param("tag");
|
||||||
auto column_families = split(req->get_query_param("cf"), ",");
|
auto column_families = split(req->get_query_param("cf"), ",");
|
||||||
auto sfopt = req->get_query_param("sf");
|
auto sfopt = req->get_query_param("sf");
|
||||||
auto tcopt = req->get_query_param("tc");
|
auto sf = db::snapshot_ctl::skip_flush(strcasecmp(sfopt.c_str(), "true") == 0);
|
||||||
|
|
||||||
db::snapshot_options opts = {
|
|
||||||
.skip_flush = strcasecmp(sfopt.c_str(), "true") == 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
|
||||||
try {
|
try {
|
||||||
if (column_families.empty()) {
|
if (column_families.empty()) {
|
||||||
co_await snap_ctl.local().take_snapshot(tag, keynames, opts);
|
co_await snap_ctl.local().take_snapshot(tag, keynames, sf);
|
||||||
} else {
|
} else {
|
||||||
if (keynames.empty()) {
|
if (keynames.empty()) {
|
||||||
throw httpd::bad_param_exception("The keyspace of column families must be specified");
|
throw httpd::bad_param_exception("The keyspace of column families must be specified");
|
||||||
@@ -2043,7 +2033,7 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
|||||||
if (keynames.size() > 1) {
|
if (keynames.size() > 1) {
|
||||||
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
|
||||||
}
|
}
|
||||||
co_await snap_ctl.local().take_column_family_snapshot(keynames[0], column_families, tag, opts);
|
co_await snap_ctl.local().take_column_family_snapshot(keynames[0], column_families, tag, sf);
|
||||||
}
|
}
|
||||||
co_return json_void();
|
co_return json_void();
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
@@ -2052,27 +2042,6 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
ss::take_cluster_snapshot.set(r, [&snap_ctl](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
|
||||||
apilog.info("take_cluster_snapshot: {}", req->get_query_params());
|
|
||||||
auto tag = req->get_query_param("tag");
|
|
||||||
auto column_families = split(req->get_query_param("table"), ",");
|
|
||||||
// Note: not published/active. Retain as internal option, but...
|
|
||||||
auto sfopt = req->get_query_param("skip_flush");
|
|
||||||
|
|
||||||
db::snapshot_options opts = {
|
|
||||||
.skip_flush = strcasecmp(sfopt.c_str(), "true") == 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<sstring> keynames = split(req->get_query_param("keyspace"), ",");
|
|
||||||
try {
|
|
||||||
co_await snap_ctl.local().take_cluster_column_family_snapshot(keynames, column_families, tag, opts);
|
|
||||||
co_return json_void();
|
|
||||||
} catch (...) {
|
|
||||||
apilog.error("take_cluster_snapshot failed: {}", std::current_exception());
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
ss::del_snapshot.set(r, [&snap_ctl](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
ss::del_snapshot.set(r, [&snap_ctl](std::unique_ptr<http::request> req) -> future<json::json_return_type> {
|
||||||
apilog.info("del_snapshot: {}", req->get_query_params());
|
apilog.info("del_snapshot: {}", req->get_query_params());
|
||||||
auto tag = req->get_query_param("tag");
|
auto tag = req->get_query_param("tag");
|
||||||
@@ -2099,8 +2068,7 @@ void set_snapshot(http_context& ctx, routes& r, sharded<db::snapshot_ctl>& snap_
|
|||||||
auto info = parse_scrub_options(ctx, std::move(req));
|
auto info = parse_scrub_options(ctx, std::move(req));
|
||||||
|
|
||||||
if (!info.snapshot_tag.empty()) {
|
if (!info.snapshot_tag.empty()) {
|
||||||
db::snapshot_options opts = {.skip_flush = false};
|
co_await snap_ctl.local().take_column_family_snapshot(info.keyspace, info.column_families, info.snapshot_tag, db::snapshot_ctl::skip_flush::no);
|
||||||
co_await snap_ctl.local().take_column_family_snapshot(info.keyspace, info.column_families, info.snapshot_tag, opts);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
compaction::compaction_stats stats;
|
compaction::compaction_stats stats;
|
||||||
@@ -2163,7 +2131,6 @@ void unset_snapshot(http_context& ctx, routes& r) {
|
|||||||
ss::start_backup.unset(r);
|
ss::start_backup.unset(r);
|
||||||
cf::get_true_snapshots_size.unset(r);
|
cf::get_true_snapshots_size.unset(r);
|
||||||
cf::get_all_true_snapshots_size.unset(r);
|
cf::get_all_true_snapshots_size.unset(r);
|
||||||
ss::decommission.unset(r);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ struct scrub_info {
|
|||||||
|
|
||||||
scrub_info parse_scrub_options(const http_context& ctx, std::unique_ptr<http::request> req);
|
scrub_info parse_scrub_options(const http_context& ctx, std::unique_ptr<http::request> req);
|
||||||
|
|
||||||
void set_storage_service(http_context& ctx, httpd::routes& r, sharded<service::storage_service>& ss, sharded<db::snapshot_ctl>&, service::raft_group0_client&);
|
void set_storage_service(http_context& ctx, httpd::routes& r, sharded<service::storage_service>& ss, service::raft_group0_client&);
|
||||||
void unset_storage_service(http_context& ctx, httpd::routes& r);
|
void unset_storage_service(http_context& ctx, httpd::routes& r);
|
||||||
void set_sstables_loader(http_context& ctx, httpd::routes& r, sharded<sstables_loader>& sst_loader);
|
void set_sstables_loader(http_context& ctx, httpd::routes& r, sharded<sstables_loader>& sst_loader);
|
||||||
void unset_sstables_loader(http_context& ctx, httpd::routes& r);
|
void unset_sstables_loader(http_context& ctx, httpd::routes& r);
|
||||||
|
|||||||
@@ -190,13 +190,6 @@ void set_system(http_context& ctx, routes& r) {
|
|||||||
return make_ready_future<json::json_return_type>(seastar::to_sstring(format));
|
return make_ready_future<json::json_return_type>(seastar::to_sstring(format));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
hs::get_chosen_sstable_version.set(r, [&ctx] (std::unique_ptr<request> req) {
|
|
||||||
return smp::submit_to(0, [&ctx] {
|
|
||||||
auto format = ctx.db.local().get_user_sstables_manager().get_preferred_sstable_version();
|
|
||||||
return make_ready_future<json::json_return_type>(seastar::to_sstring(format));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,6 @@
|
|||||||
#include <seastar/core/chunked_fifo.hh>
|
#include <seastar/core/chunked_fifo.hh>
|
||||||
#include <seastar/core/coroutine.hh>
|
#include <seastar/core/coroutine.hh>
|
||||||
#include <seastar/coroutine/exception.hh>
|
#include <seastar/coroutine/exception.hh>
|
||||||
#include <seastar/coroutine/maybe_yield.hh>
|
|
||||||
#include <seastar/http/exception.hh>
|
#include <seastar/http/exception.hh>
|
||||||
|
|
||||||
#include "task_manager.hh"
|
#include "task_manager.hh"
|
||||||
@@ -56,6 +55,7 @@ tm::task_status make_status(tasks::task_status status, sharded<gms::gossiper>& g
|
|||||||
res.scope = status.scope;
|
res.scope = status.scope;
|
||||||
res.state = status.state;
|
res.state = status.state;
|
||||||
res.is_abortable = bool(status.is_abortable);
|
res.is_abortable = bool(status.is_abortable);
|
||||||
|
res.creation_time = get_time(status.creation_time);
|
||||||
res.start_time = get_time(status.start_time);
|
res.start_time = get_time(status.start_time);
|
||||||
res.end_time = get_time(status.end_time);
|
res.end_time = get_time(status.end_time);
|
||||||
res.error = status.error;
|
res.error = status.error;
|
||||||
@@ -84,6 +84,7 @@ tm::task_stats make_stats(tasks::task_stats stats) {
|
|||||||
res.table = stats.table;
|
res.table = stats.table;
|
||||||
res.entity = stats.entity;
|
res.entity = stats.entity;
|
||||||
res.shard = stats.shard;
|
res.shard = stats.shard;
|
||||||
|
res.creation_time = get_time(stats.creation_time);
|
||||||
res.start_time = get_time(stats.start_time);
|
res.start_time = get_time(stats.start_time);
|
||||||
res.end_time = get_time(stats.end_time);;
|
res.end_time = get_time(stats.end_time);;
|
||||||
return res;
|
return res;
|
||||||
@@ -265,7 +266,7 @@ void set_task_manager(http_context& ctx, routes& r, sharded<tasks::task_manager>
|
|||||||
if (id) {
|
if (id) {
|
||||||
module->unregister_task(id);
|
module->unregister_task(id);
|
||||||
}
|
}
|
||||||
co_await coroutine::maybe_yield();
|
co_await maybe_yield();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
co_return json_void();
|
co_return json_void();
|
||||||
|
|||||||
@@ -146,8 +146,7 @@ void set_tasks_compaction_module(http_context& ctx, routes& r, sharded<service::
|
|||||||
auto info = parse_scrub_options(ctx, std::move(req));
|
auto info = parse_scrub_options(ctx, std::move(req));
|
||||||
|
|
||||||
if (!info.snapshot_tag.empty()) {
|
if (!info.snapshot_tag.empty()) {
|
||||||
db::snapshot_options opts = {.skip_flush = false};
|
co_await snap_ctl.local().take_column_family_snapshot(info.keyspace, info.column_families, info.snapshot_tag, db::snapshot_ctl::skip_flush::no);
|
||||||
co_await snap_ctl.local().take_column_family_snapshot(info.keyspace, info.column_families, info.snapshot_tag, opts);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& compaction_module = db.local().get_compaction_manager().get_task_manager_module();
|
auto& compaction_module = db.local().get_compaction_manager().get_task_manager_module();
|
||||||
|
|||||||
@@ -209,11 +209,15 @@ future<> audit::stop_audit() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
audit_info_ptr audit::create_audit_info(statement_category cat, const sstring& keyspace, const sstring& table, bool batch) {
|
audit_info_ptr audit::create_audit_info(statement_category cat, const sstring& keyspace, const sstring& table) {
|
||||||
if (!audit_instance().local_is_initialized()) {
|
if (!audit_instance().local_is_initialized()) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
return std::make_unique<audit_info>(cat, keyspace, table, batch);
|
return std::make_unique<audit_info>(cat, keyspace, table);
|
||||||
|
}
|
||||||
|
|
||||||
|
audit_info_ptr audit::create_no_audit_info() {
|
||||||
|
return audit_info_ptr();
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> audit::start(const db::config& cfg) {
|
future<> audit::start(const db::config& cfg) {
|
||||||
@@ -263,21 +267,18 @@ future<> audit::log_login(const sstring& username, socket_address client_ip, boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
future<> inspect(shared_ptr<cql3::cql_statement> statement, service::query_state& query_state, const cql3::query_options& options, bool error) {
|
future<> inspect(shared_ptr<cql3::cql_statement> statement, service::query_state& query_state, const cql3::query_options& options, bool error) {
|
||||||
auto audit_info = statement->get_audit_info();
|
cql3::statements::batch_statement* batch = dynamic_cast<cql3::statements::batch_statement*>(statement.get());
|
||||||
if (!audit_info) {
|
if (batch != nullptr) {
|
||||||
return make_ready_future<>();
|
|
||||||
}
|
|
||||||
if (audit_info->batch()) {
|
|
||||||
cql3::statements::batch_statement* batch = static_cast<cql3::statements::batch_statement*>(statement.get());
|
|
||||||
return do_for_each(batch->statements().begin(), batch->statements().end(), [&query_state, &options, error] (auto&& m) {
|
return do_for_each(batch->statements().begin(), batch->statements().end(), [&query_state, &options, error] (auto&& m) {
|
||||||
return inspect(m.statement, query_state, options, error);
|
return inspect(m.statement, query_state, options, error);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
if (audit::local_audit_instance().should_log(audit_info)) {
|
auto audit_info = statement->get_audit_info();
|
||||||
|
if (bool(audit_info) && audit::local_audit_instance().should_log(audit_info)) {
|
||||||
return audit::local_audit_instance().log(audit_info, query_state, options, error);
|
return audit::local_audit_instance().log(audit_info, query_state, options, error);
|
||||||
}
|
}
|
||||||
return make_ready_future<>();
|
|
||||||
}
|
}
|
||||||
|
return make_ready_future<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> inspect_login(const sstring& username, socket_address client_ip, bool error) {
|
future<> inspect_login(const sstring& username, socket_address client_ip, bool error) {
|
||||||
|
|||||||
@@ -75,13 +75,11 @@ class audit_info final {
|
|||||||
sstring _keyspace;
|
sstring _keyspace;
|
||||||
sstring _table;
|
sstring _table;
|
||||||
sstring _query;
|
sstring _query;
|
||||||
bool _batch;
|
|
||||||
public:
|
public:
|
||||||
audit_info(statement_category cat, sstring keyspace, sstring table, bool batch)
|
audit_info(statement_category cat, sstring keyspace, sstring table)
|
||||||
: _category(cat)
|
: _category(cat)
|
||||||
, _keyspace(std::move(keyspace))
|
, _keyspace(std::move(keyspace))
|
||||||
, _table(std::move(table))
|
, _table(std::move(table))
|
||||||
, _batch(batch)
|
|
||||||
{ }
|
{ }
|
||||||
void set_query_string(const std::string_view& query_string) {
|
void set_query_string(const std::string_view& query_string) {
|
||||||
_query = sstring(query_string);
|
_query = sstring(query_string);
|
||||||
@@ -91,7 +89,6 @@ public:
|
|||||||
const sstring& query() const { return _query; }
|
const sstring& query() const { return _query; }
|
||||||
sstring category_string() const;
|
sstring category_string() const;
|
||||||
statement_category category() const { return _category; }
|
statement_category category() const { return _category; }
|
||||||
bool batch() const { return _batch; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
using audit_info_ptr = std::unique_ptr<audit_info>;
|
using audit_info_ptr = std::unique_ptr<audit_info>;
|
||||||
@@ -129,7 +126,8 @@ public:
|
|||||||
}
|
}
|
||||||
static future<> start_audit(const db::config& cfg, sharded<locator::shared_token_metadata>& stm, sharded<cql3::query_processor>& qp, sharded<service::migration_manager>& mm);
|
static future<> start_audit(const db::config& cfg, sharded<locator::shared_token_metadata>& stm, sharded<cql3::query_processor>& qp, sharded<service::migration_manager>& mm);
|
||||||
static future<> stop_audit();
|
static future<> stop_audit();
|
||||||
static audit_info_ptr create_audit_info(statement_category cat, const sstring& keyspace, const sstring& table, bool batch = false);
|
static audit_info_ptr create_audit_info(statement_category cat, const sstring& keyspace, const sstring& table);
|
||||||
|
static audit_info_ptr create_no_audit_info();
|
||||||
audit(locator::shared_token_metadata& stm,
|
audit(locator::shared_token_metadata& stm,
|
||||||
cql3::query_processor& qp,
|
cql3::query_processor& qp,
|
||||||
service::migration_manager& mm,
|
service::migration_manager& mm,
|
||||||
|
|||||||
@@ -53,10 +53,10 @@ static std::string json_escape(std::string_view str) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> audit_syslog_storage_helper::syslog_send_helper(temporary_buffer<char> msg) {
|
future<> audit_syslog_storage_helper::syslog_send_helper(const sstring& msg) {
|
||||||
try {
|
try {
|
||||||
auto lock = co_await get_units(_semaphore, 1, std::chrono::hours(1));
|
auto lock = co_await get_units(_semaphore, 1, std::chrono::hours(1));
|
||||||
co_await _sender.send(_syslog_address, std::span(&msg, 1));
|
co_await _sender.send(_syslog_address, net::packet{msg.data(), msg.size()});
|
||||||
}
|
}
|
||||||
catch (const std::exception& e) {
|
catch (const std::exception& e) {
|
||||||
auto error_msg = seastar::format(
|
auto error_msg = seastar::format(
|
||||||
@@ -90,7 +90,7 @@ future<> audit_syslog_storage_helper::start(const db::config& cfg) {
|
|||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
co_await syslog_send_helper(temporary_buffer<char>::copy_of("Initializing syslog audit backend."));
|
co_await syslog_send_helper("Initializing syslog audit backend.");
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> audit_syslog_storage_helper::stop() {
|
future<> audit_syslog_storage_helper::stop() {
|
||||||
@@ -120,7 +120,7 @@ future<> audit_syslog_storage_helper::write(const audit_info* audit_info,
|
|||||||
audit_info->table(),
|
audit_info->table(),
|
||||||
username);
|
username);
|
||||||
|
|
||||||
co_await syslog_send_helper(std::move(msg).release());
|
co_await syslog_send_helper(msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> audit_syslog_storage_helper::write_login(const sstring& username,
|
future<> audit_syslog_storage_helper::write_login(const sstring& username,
|
||||||
@@ -139,7 +139,7 @@ future<> audit_syslog_storage_helper::write_login(const sstring& username,
|
|||||||
client_ip,
|
client_ip,
|
||||||
username);
|
username);
|
||||||
|
|
||||||
co_await syslog_send_helper(std::move(msg).release());
|
co_await syslog_send_helper(msg.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ class audit_syslog_storage_helper : public storage_helper {
|
|||||||
net::datagram_channel _sender;
|
net::datagram_channel _sender;
|
||||||
seastar::semaphore _semaphore;
|
seastar::semaphore _semaphore;
|
||||||
|
|
||||||
future<> syslog_send_helper(seastar::temporary_buffer<char> msg);
|
future<> syslog_send_helper(const sstring& msg);
|
||||||
public:
|
public:
|
||||||
explicit audit_syslog_storage_helper(cql3::query_processor&, service::migration_manager&);
|
explicit audit_syslog_storage_helper(cql3::query_processor&, service::migration_manager&);
|
||||||
virtual ~audit_syslog_storage_helper();
|
virtual ~audit_syslog_storage_helper();
|
||||||
|
|||||||
@@ -17,14 +17,15 @@ target_sources(scylla_auth
|
|||||||
password_authenticator.cc
|
password_authenticator.cc
|
||||||
passwords.cc
|
passwords.cc
|
||||||
permission.cc
|
permission.cc
|
||||||
|
permissions_cache.cc
|
||||||
resource.cc
|
resource.cc
|
||||||
role_or_anonymous.cc
|
role_or_anonymous.cc
|
||||||
|
roles-metadata.cc
|
||||||
sasl_challenge.cc
|
sasl_challenge.cc
|
||||||
saslauthd_authenticator.cc
|
saslauthd_authenticator.cc
|
||||||
service.cc
|
service.cc
|
||||||
standard_role_manager.cc
|
standard_role_manager.cc
|
||||||
transitional.cc
|
transitional.cc
|
||||||
maintenance_socket_authenticator.cc
|
|
||||||
maintenance_socket_role_manager.cc)
|
maintenance_socket_role_manager.cc)
|
||||||
target_include_directories(scylla_auth
|
target_include_directories(scylla_auth
|
||||||
PUBLIC
|
PUBLIC
|
||||||
@@ -48,4 +49,4 @@ if (Scylla_USE_PRECOMPILED_HEADER_USE)
|
|||||||
target_precompile_headers(scylla_auth REUSE_FROM scylla-precompiled-header)
|
target_precompile_headers(scylla_auth REUSE_FROM scylla-precompiled-header)
|
||||||
endif()
|
endif()
|
||||||
check_headers(check-headers scylla_auth
|
check_headers(check-headers scylla_auth
|
||||||
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
GLOB_RECURSE ${CMAKE_CURRENT_SOURCE_DIR}/*.hh)
|
||||||
@@ -9,9 +9,21 @@
|
|||||||
#include "auth/allow_all_authenticator.hh"
|
#include "auth/allow_all_authenticator.hh"
|
||||||
|
|
||||||
#include "service/migration_manager.hh"
|
#include "service/migration_manager.hh"
|
||||||
|
#include "utils/alien_worker.hh"
|
||||||
|
#include "utils/class_registrator.hh"
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
constexpr std::string_view allow_all_authenticator_name("org.apache.cassandra.auth.AllowAllAuthenticator");
|
constexpr std::string_view allow_all_authenticator_name("org.apache.cassandra.auth.AllowAllAuthenticator");
|
||||||
|
|
||||||
|
// To ensure correct initialization order, we unfortunately need to use a string literal.
|
||||||
|
static const class_registrator<
|
||||||
|
authenticator,
|
||||||
|
allow_all_authenticator,
|
||||||
|
cql3::query_processor&,
|
||||||
|
::service::raft_group0_client&,
|
||||||
|
::service::migration_manager&,
|
||||||
|
cache&,
|
||||||
|
utils::alien_worker&> registration("org.apache.cassandra.auth.AllowAllAuthenticator");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@
|
|||||||
#include "auth/authenticator.hh"
|
#include "auth/authenticator.hh"
|
||||||
#include "auth/cache.hh"
|
#include "auth/cache.hh"
|
||||||
#include "auth/common.hh"
|
#include "auth/common.hh"
|
||||||
|
#include "utils/alien_worker.hh"
|
||||||
|
|
||||||
namespace cql3 {
|
namespace cql3 {
|
||||||
class query_processor;
|
class query_processor;
|
||||||
@@ -29,7 +30,7 @@ extern const std::string_view allow_all_authenticator_name;
|
|||||||
|
|
||||||
class allow_all_authenticator final : public authenticator {
|
class allow_all_authenticator final : public authenticator {
|
||||||
public:
|
public:
|
||||||
allow_all_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&) {
|
allow_all_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&, utils::alien_worker&) {
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual future<> start() override {
|
virtual future<> start() override {
|
||||||
|
|||||||
@@ -9,9 +9,18 @@
|
|||||||
#include "auth/allow_all_authorizer.hh"
|
#include "auth/allow_all_authorizer.hh"
|
||||||
|
|
||||||
#include "auth/common.hh"
|
#include "auth/common.hh"
|
||||||
|
#include "utils/class_registrator.hh"
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
constexpr std::string_view allow_all_authorizer_name("org.apache.cassandra.auth.AllowAllAuthorizer");
|
constexpr std::string_view allow_all_authorizer_name("org.apache.cassandra.auth.AllowAllAuthorizer");
|
||||||
|
|
||||||
|
// To ensure correct initialization order, we unfortunately need to use a string literal.
|
||||||
|
static const class_registrator<
|
||||||
|
authorizer,
|
||||||
|
allow_all_authorizer,
|
||||||
|
cql3::query_processor&,
|
||||||
|
::service::raft_group0_client&,
|
||||||
|
::service::migration_manager&> registration("org.apache.cassandra.auth.AllowAllAuthorizer");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ extern const std::string_view allow_all_authorizer_name;
|
|||||||
|
|
||||||
class allow_all_authorizer final : public authorizer {
|
class allow_all_authorizer final : public authorizer {
|
||||||
public:
|
public:
|
||||||
allow_all_authorizer(cql3::query_processor&) {
|
allow_all_authorizer(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&) {
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual future<> start() override {
|
virtual future<> start() override {
|
||||||
|
|||||||
209
auth/cache.cc
209
auth/cache.cc
@@ -8,7 +8,6 @@
|
|||||||
|
|
||||||
#include "auth/cache.hh"
|
#include "auth/cache.hh"
|
||||||
#include "auth/common.hh"
|
#include "auth/common.hh"
|
||||||
#include "auth/role_or_anonymous.hh"
|
|
||||||
#include "auth/roles-metadata.hh"
|
#include "auth/roles-metadata.hh"
|
||||||
#include "cql3/query_processor.hh"
|
#include "cql3/query_processor.hh"
|
||||||
#include "cql3/untyped_result_set.hh"
|
#include "cql3/untyped_result_set.hh"
|
||||||
@@ -16,38 +15,19 @@
|
|||||||
#include "db/system_keyspace.hh"
|
#include "db/system_keyspace.hh"
|
||||||
#include "schema/schema.hh"
|
#include "schema/schema.hh"
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <seastar/core/abort_source.hh>
|
|
||||||
#include <seastar/coroutine/maybe_yield.hh>
|
#include <seastar/coroutine/maybe_yield.hh>
|
||||||
#include <seastar/core/format.hh>
|
#include <seastar/core/format.hh>
|
||||||
#include <seastar/core/metrics.hh>
|
|
||||||
#include <seastar/core/do_with.hh>
|
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
logging::logger logger("auth-cache");
|
logging::logger logger("auth-cache");
|
||||||
|
|
||||||
cache::cache(cql3::query_processor& qp, abort_source& as) noexcept
|
cache::cache(cql3::query_processor& qp) noexcept
|
||||||
: _current_version(0)
|
: _current_version(0)
|
||||||
, _qp(qp)
|
, _qp(qp) {
|
||||||
, _loading_sem(1)
|
|
||||||
, _as(as)
|
|
||||||
, _permission_loader(nullptr)
|
|
||||||
, _permission_loader_sem(8) {
|
|
||||||
namespace sm = seastar::metrics;
|
|
||||||
_metrics.add_group("auth_cache", {
|
|
||||||
sm::make_gauge("roles", [this] { return _roles.size(); },
|
|
||||||
sm::description("Number of roles currently cached")),
|
|
||||||
sm::make_gauge("permissions", [this] {
|
|
||||||
return _cached_permissions_count;
|
|
||||||
}, sm::description("Total number of permission sets currently cached across all roles"))
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void cache::set_permission_loader(permission_loader_func loader) {
|
lw_shared_ptr<const cache::role_record> cache::get(const role_name_t& role) const noexcept {
|
||||||
_permission_loader = std::move(loader);
|
|
||||||
}
|
|
||||||
|
|
||||||
lw_shared_ptr<const cache::role_record> cache::get(std::string_view role) const noexcept {
|
|
||||||
auto it = _roles.find(role);
|
auto it = _roles.find(role);
|
||||||
if (it == _roles.end()) {
|
if (it == _roles.end()) {
|
||||||
return {};
|
return {};
|
||||||
@@ -55,93 +35,6 @@ lw_shared_ptr<const cache::role_record> cache::get(std::string_view role) const
|
|||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
void cache::for_each_role(const std::function<void(const role_name_t&, const role_record&)>& func) const {
|
|
||||||
for (const auto& [name, record] : _roles) {
|
|
||||||
func(name, *record);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t cache::roles_count() const noexcept {
|
|
||||||
return _roles.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
future<permission_set> cache::get_permissions(const role_or_anonymous& role, const resource& r) {
|
|
||||||
std::unordered_map<resource, permission_set>* perms_cache;
|
|
||||||
lw_shared_ptr<role_record> role_ptr;
|
|
||||||
|
|
||||||
if (is_anonymous(role)) {
|
|
||||||
perms_cache = &_anonymous_permissions;
|
|
||||||
} else {
|
|
||||||
const auto& role_name = *role.name;
|
|
||||||
auto role_it = _roles.find(role_name);
|
|
||||||
if (role_it == _roles.end()) {
|
|
||||||
// Role might have been deleted but there are some connections
|
|
||||||
// left which reference it. They should no longer have access to anything.
|
|
||||||
return make_ready_future<permission_set>(permissions::NONE);
|
|
||||||
}
|
|
||||||
role_ptr = role_it->second;
|
|
||||||
perms_cache = &role_ptr->cached_permissions;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (auto it = perms_cache->find(r); it != perms_cache->end()) {
|
|
||||||
return make_ready_future<permission_set>(it->second);
|
|
||||||
}
|
|
||||||
// keep alive role_ptr as it holds perms_cache (except anonymous)
|
|
||||||
return do_with(std::move(role_ptr), [this, &role, &r, perms_cache] (auto& role_ptr) {
|
|
||||||
return load_permissions(role, r, perms_cache);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
future<permission_set> cache::load_permissions(const role_or_anonymous& role, const resource& r, std::unordered_map<resource, permission_set>* perms_cache) {
|
|
||||||
SCYLLA_ASSERT(_permission_loader);
|
|
||||||
auto units = co_await get_units(_permission_loader_sem, 1, _as);
|
|
||||||
|
|
||||||
// Check again, perhaps we were blocked and other call loaded
|
|
||||||
// the permissions already. This is a protection against misses storm.
|
|
||||||
if (auto it = perms_cache->find(r); it != perms_cache->end()) {
|
|
||||||
co_return it->second;
|
|
||||||
}
|
|
||||||
auto perms = co_await _permission_loader(role, r);
|
|
||||||
add_permissions(*perms_cache, r, perms);
|
|
||||||
co_return perms;
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> cache::prune(const resource& r) {
|
|
||||||
auto units = co_await get_units(_loading_sem, 1, _as);
|
|
||||||
_anonymous_permissions.erase(r);
|
|
||||||
for (auto& it : _roles) {
|
|
||||||
// Prunning can run concurrently with other functions but it
|
|
||||||
// can only cause cached_permissions extra reload via get_permissions.
|
|
||||||
remove_permissions(it.second->cached_permissions, r);
|
|
||||||
co_await coroutine::maybe_yield();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> cache::reload_all_permissions() noexcept {
|
|
||||||
SCYLLA_ASSERT(_permission_loader);
|
|
||||||
auto units = co_await get_units(_loading_sem, 1, _as);
|
|
||||||
auto copy_keys = [] (const std::unordered_map<resource, permission_set>& m) {
|
|
||||||
std::vector<resource> keys;
|
|
||||||
keys.reserve(m.size());
|
|
||||||
for (const auto& [res, _] : m) {
|
|
||||||
keys.push_back(res);
|
|
||||||
}
|
|
||||||
return keys;
|
|
||||||
};
|
|
||||||
const role_or_anonymous anon;
|
|
||||||
for (const auto& res : copy_keys(_anonymous_permissions)) {
|
|
||||||
_anonymous_permissions[res] = co_await _permission_loader(anon, res);
|
|
||||||
}
|
|
||||||
for (auto& [role, entry] : _roles) {
|
|
||||||
auto& perms_cache = entry->cached_permissions;
|
|
||||||
auto r = role_or_anonymous(role);
|
|
||||||
for (const auto& res : copy_keys(perms_cache)) {
|
|
||||||
perms_cache[res] = co_await _permission_loader(r, res);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.debug("Reloaded auth cache with {} entries", _roles.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
future<lw_shared_ptr<cache::role_record>> cache::fetch_role(const role_name_t& role) const {
|
future<lw_shared_ptr<cache::role_record>> cache::fetch_role(const role_name_t& role) const {
|
||||||
auto rec = make_lw_shared<role_record>();
|
auto rec = make_lw_shared<role_record>();
|
||||||
rec->version = _current_version;
|
rec->version = _current_version;
|
||||||
@@ -209,7 +102,7 @@ future<lw_shared_ptr<cache::role_record>> cache::fetch_role(const role_name_t& r
|
|||||||
future<> cache::prune_all() noexcept {
|
future<> cache::prune_all() noexcept {
|
||||||
for (auto it = _roles.begin(); it != _roles.end(); ) {
|
for (auto it = _roles.begin(); it != _roles.end(); ) {
|
||||||
if (it->second->version != _current_version) {
|
if (it->second->version != _current_version) {
|
||||||
remove_role(it++);
|
_roles.erase(it++);
|
||||||
co_await coroutine::maybe_yield();
|
co_await coroutine::maybe_yield();
|
||||||
} else {
|
} else {
|
||||||
++it;
|
++it;
|
||||||
@@ -219,9 +112,10 @@ future<> cache::prune_all() noexcept {
|
|||||||
}
|
}
|
||||||
|
|
||||||
future<> cache::load_all() {
|
future<> cache::load_all() {
|
||||||
|
if (legacy_mode(_qp)) {
|
||||||
|
co_return;
|
||||||
|
}
|
||||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
SCYLLA_ASSERT(this_shard_id() == 0);
|
||||||
auto units = co_await get_units(_loading_sem, 1, _as);
|
|
||||||
|
|
||||||
++_current_version;
|
++_current_version;
|
||||||
|
|
||||||
logger.info("Loading all roles");
|
logger.info("Loading all roles");
|
||||||
@@ -230,7 +124,7 @@ future<> cache::load_all() {
|
|||||||
const auto name = r.get_as<sstring>("role");
|
const auto name = r.get_as<sstring>("role");
|
||||||
auto role = co_await fetch_role(name);
|
auto role = co_await fetch_role(name);
|
||||||
if (role) {
|
if (role) {
|
||||||
add_role(name, role);
|
_roles[name] = role;
|
||||||
}
|
}
|
||||||
co_return stop_iteration::no;
|
co_return stop_iteration::no;
|
||||||
};
|
};
|
||||||
@@ -243,71 +137,36 @@ future<> cache::load_all() {
|
|||||||
co_await distribute_role(name, role);
|
co_await distribute_role(name, role);
|
||||||
}
|
}
|
||||||
co_await container().invoke_on_others([this](cache& c) -> future<> {
|
co_await container().invoke_on_others([this](cache& c) -> future<> {
|
||||||
auto units = co_await get_units(c._loading_sem, 1, c._as);
|
|
||||||
c._current_version = _current_version;
|
c._current_version = _current_version;
|
||||||
co_await c.prune_all();
|
co_await c.prune_all();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> cache::gather_inheriting_roles(std::unordered_set<role_name_t>& roles, lw_shared_ptr<cache::role_record> role, const role_name_t& name) {
|
future<> cache::load_roles(std::unordered_set<role_name_t> roles) {
|
||||||
if (!role) {
|
if (legacy_mode(_qp)) {
|
||||||
// Role might have been removed or not yet added, either way
|
|
||||||
// their members will be handled by another top call to this function.
|
|
||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
for (const auto& member_name : role->members) {
|
|
||||||
bool is_new = roles.insert(member_name).second;
|
|
||||||
if (!is_new) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
lw_shared_ptr<cache::role_record> member_role;
|
|
||||||
auto r = _roles.find(member_name);
|
|
||||||
if (r != _roles.end()) {
|
|
||||||
member_role = r->second;
|
|
||||||
}
|
|
||||||
co_await gather_inheriting_roles(roles, member_role, member_name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> cache::load_roles(std::unordered_set<role_name_t> roles) {
|
|
||||||
SCYLLA_ASSERT(this_shard_id() == 0);
|
|
||||||
auto units = co_await get_units(_loading_sem, 1, _as);
|
|
||||||
|
|
||||||
std::unordered_set<role_name_t> roles_to_clear_perms;
|
|
||||||
for (const auto& name : roles) {
|
for (const auto& name : roles) {
|
||||||
logger.info("Loading role {}", name);
|
logger.info("Loading role {}", name);
|
||||||
auto role = co_await fetch_role(name);
|
auto role = co_await fetch_role(name);
|
||||||
if (role) {
|
if (role) {
|
||||||
add_role(name, role);
|
_roles[name] = role;
|
||||||
co_await gather_inheriting_roles(roles_to_clear_perms, role, name);
|
|
||||||
} else {
|
} else {
|
||||||
if (auto it = _roles.find(name); it != _roles.end()) {
|
_roles.erase(name);
|
||||||
auto old_role = it->second;
|
|
||||||
remove_role(it);
|
|
||||||
co_await gather_inheriting_roles(roles_to_clear_perms, old_role, name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
co_await distribute_role(name, role);
|
co_await distribute_role(name, role);
|
||||||
}
|
}
|
||||||
|
|
||||||
co_await container().invoke_on_all([&roles_to_clear_perms] (cache& c) -> future<> {
|
|
||||||
for (const auto& name : roles_to_clear_perms) {
|
|
||||||
c.clear_role_permissions(name);
|
|
||||||
co_await coroutine::maybe_yield();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> cache::distribute_role(const role_name_t& name, lw_shared_ptr<role_record> role) {
|
future<> cache::distribute_role(const role_name_t& name, lw_shared_ptr<role_record> role) {
|
||||||
auto role_ptr = role.get();
|
auto role_ptr = role.get();
|
||||||
co_await container().invoke_on_others([&name, role_ptr](cache& c) -> future<> {
|
co_await container().invoke_on_others([&name, role_ptr](cache& c) {
|
||||||
auto units = co_await get_units(c._loading_sem, 1, c._as);
|
|
||||||
if (!role_ptr) {
|
if (!role_ptr) {
|
||||||
c.remove_role(name);
|
c._roles.erase(name);
|
||||||
co_return;
|
return;
|
||||||
}
|
}
|
||||||
auto role_copy = make_lw_shared<role_record>(*role_ptr);
|
auto role_copy = make_lw_shared<role_record>(*role_ptr);
|
||||||
c.add_role(name, std::move(role_copy));
|
c._roles[name] = std::move(role_copy);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,40 +177,4 @@ bool cache::includes_table(const table_id& id) noexcept {
|
|||||||
|| id == db::system_keyspace::role_permissions()->id();
|
|| id == db::system_keyspace::role_permissions()->id();
|
||||||
}
|
}
|
||||||
|
|
||||||
void cache::add_role(const role_name_t& name, lw_shared_ptr<role_record> role) {
|
|
||||||
if (auto it = _roles.find(name); it != _roles.end()) {
|
|
||||||
_cached_permissions_count -= it->second->cached_permissions.size();
|
|
||||||
}
|
|
||||||
_cached_permissions_count += role->cached_permissions.size();
|
|
||||||
_roles[name] = std::move(role);
|
|
||||||
}
|
|
||||||
|
|
||||||
void cache::remove_role(const role_name_t& name) {
|
|
||||||
if (auto it = _roles.find(name); it != _roles.end()) {
|
|
||||||
remove_role(it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void cache::remove_role(roles_map::iterator it) {
|
|
||||||
_cached_permissions_count -= it->second->cached_permissions.size();
|
|
||||||
_roles.erase(it);
|
|
||||||
}
|
|
||||||
|
|
||||||
void cache::clear_role_permissions(const role_name_t& name) {
|
|
||||||
if (auto it = _roles.find(name); it != _roles.end()) {
|
|
||||||
_cached_permissions_count -= it->second->cached_permissions.size();
|
|
||||||
it->second->cached_permissions.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void cache::add_permissions(std::unordered_map<resource, permission_set>& cache, const resource& r, permission_set perms) {
|
|
||||||
if (cache.emplace(r, perms).second) {
|
|
||||||
++_cached_permissions_count;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void cache::remove_permissions(std::unordered_map<resource, permission_set>& cache, const resource& r) {
|
|
||||||
_cached_permissions_count -= cache.erase(r);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace auth
|
} // namespace auth
|
||||||
|
|||||||
@@ -8,8 +8,6 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <seastar/core/abort_source.hh>
|
|
||||||
#include <string_view>
|
|
||||||
#include <unordered_set>
|
#include <unordered_set>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
|
||||||
@@ -17,15 +15,11 @@
|
|||||||
#include <seastar/core/future.hh>
|
#include <seastar/core/future.hh>
|
||||||
#include <seastar/core/sharded.hh>
|
#include <seastar/core/sharded.hh>
|
||||||
#include <seastar/core/shared_ptr.hh>
|
#include <seastar/core/shared_ptr.hh>
|
||||||
#include <seastar/core/semaphore.hh>
|
|
||||||
#include <seastar/core/metrics_registration.hh>
|
|
||||||
|
|
||||||
#include "absl-flat_hash_map.hh"
|
#include <absl/container/flat_hash_map.h>
|
||||||
|
|
||||||
#include "auth/permission.hh"
|
#include "auth/permission.hh"
|
||||||
#include "auth/common.hh"
|
#include "auth/common.hh"
|
||||||
#include "auth/resource.hh"
|
|
||||||
#include "auth/role_or_anonymous.hh"
|
|
||||||
|
|
||||||
namespace cql3 { class query_processor; }
|
namespace cql3 { class query_processor; }
|
||||||
|
|
||||||
@@ -35,7 +29,6 @@ class cache : public peering_sharded_service<cache> {
|
|||||||
public:
|
public:
|
||||||
using role_name_t = sstring;
|
using role_name_t = sstring;
|
||||||
using version_tag_t = char;
|
using version_tag_t = char;
|
||||||
using permission_loader_func = std::function<future<permission_set>(const role_or_anonymous&, const resource&)>;
|
|
||||||
|
|
||||||
struct role_record {
|
struct role_record {
|
||||||
bool can_login = false;
|
bool can_login = false;
|
||||||
@@ -43,60 +36,26 @@ public:
|
|||||||
std::unordered_set<role_name_t> member_of;
|
std::unordered_set<role_name_t> member_of;
|
||||||
std::unordered_set<role_name_t> members;
|
std::unordered_set<role_name_t> members;
|
||||||
sstring salted_hash;
|
sstring salted_hash;
|
||||||
std::unordered_map<sstring, sstring, sstring_hash, sstring_eq> attributes;
|
std::unordered_map<sstring, sstring> attributes;
|
||||||
std::unordered_map<sstring, permission_set, sstring_hash, sstring_eq> permissions;
|
std::unordered_map<sstring, permission_set> permissions;
|
||||||
private:
|
|
||||||
friend cache;
|
|
||||||
// cached permissions include effects of role's inheritance
|
|
||||||
std::unordered_map<resource, permission_set> cached_permissions;
|
|
||||||
version_tag_t version; // used for seamless cache reloads
|
version_tag_t version; // used for seamless cache reloads
|
||||||
};
|
};
|
||||||
|
|
||||||
explicit cache(cql3::query_processor& qp, abort_source& as) noexcept;
|
explicit cache(cql3::query_processor& qp) noexcept;
|
||||||
lw_shared_ptr<const role_record> get(std::string_view role) const noexcept;
|
lw_shared_ptr<const role_record> get(const role_name_t& role) const noexcept;
|
||||||
void set_permission_loader(permission_loader_func loader);
|
|
||||||
future<permission_set> get_permissions(const role_or_anonymous& role, const resource& r);
|
|
||||||
future<> prune(const resource& r);
|
|
||||||
future<> reload_all_permissions() noexcept;
|
|
||||||
future<> load_all();
|
future<> load_all();
|
||||||
future<> load_roles(std::unordered_set<role_name_t> roles);
|
future<> load_roles(std::unordered_set<role_name_t> roles);
|
||||||
static bool includes_table(const table_id&) noexcept;
|
static bool includes_table(const table_id&) noexcept;
|
||||||
|
|
||||||
// Returns the number of roles in the cache.
|
|
||||||
size_t roles_count() const noexcept;
|
|
||||||
|
|
||||||
// The callback doesn't suspend (no co_await) so it observes the state
|
|
||||||
// of the cache atomically.
|
|
||||||
void for_each_role(const std::function<void(const role_name_t&, const role_record&)>& func) const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using roles_map = absl::flat_hash_map<role_name_t, lw_shared_ptr<role_record>, sstring_hash, sstring_eq>;
|
using roles_map = absl::flat_hash_map<role_name_t, lw_shared_ptr<role_record>>;
|
||||||
roles_map _roles;
|
roles_map _roles;
|
||||||
// anonymous permissions map exists mainly due to compatibility with
|
|
||||||
// higher layers which use role_or_anonymous to get permissions.
|
|
||||||
std::unordered_map<resource, permission_set> _anonymous_permissions;
|
|
||||||
version_tag_t _current_version;
|
version_tag_t _current_version;
|
||||||
cql3::query_processor& _qp;
|
cql3::query_processor& _qp;
|
||||||
semaphore _loading_sem; // protects iteration of _roles map
|
|
||||||
abort_source& _as;
|
|
||||||
permission_loader_func _permission_loader;
|
|
||||||
semaphore _permission_loader_sem; // protects against reload storms on a single role change
|
|
||||||
metrics::metric_groups _metrics;
|
|
||||||
size_t _cached_permissions_count = 0;
|
|
||||||
|
|
||||||
future<lw_shared_ptr<role_record>> fetch_role(const role_name_t& role) const;
|
future<lw_shared_ptr<role_record>> fetch_role(const role_name_t& role) const;
|
||||||
future<> prune_all() noexcept;
|
future<> prune_all() noexcept;
|
||||||
future<> distribute_role(const role_name_t& name, const lw_shared_ptr<role_record> role);
|
future<> distribute_role(const role_name_t& name, const lw_shared_ptr<role_record> role);
|
||||||
future<> gather_inheriting_roles(std::unordered_set<role_name_t>& roles, lw_shared_ptr<cache::role_record> role, const role_name_t& name);
|
|
||||||
|
|
||||||
void add_role(const role_name_t& name, lw_shared_ptr<role_record> role);
|
|
||||||
void remove_role(const role_name_t& name);
|
|
||||||
void remove_role(roles_map::iterator it);
|
|
||||||
void clear_role_permissions(const role_name_t& name);
|
|
||||||
void add_permissions(std::unordered_map<resource, permission_set>& cache, const resource& r, permission_set perms);
|
|
||||||
void remove_permissions(std::unordered_map<resource, permission_set>& cache, const resource& r);
|
|
||||||
|
|
||||||
future<permission_set> load_permissions(const role_or_anonymous& role, const resource& r, std::unordered_map<resource, permission_set>* perms_cache);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace auth
|
} // namespace auth
|
||||||
|
|||||||
@@ -13,11 +13,14 @@
|
|||||||
#include <boost/regex.hpp>
|
#include <boost/regex.hpp>
|
||||||
#include <fmt/ranges.h>
|
#include <fmt/ranges.h>
|
||||||
|
|
||||||
|
#include "utils/class_registrator.hh"
|
||||||
#include "utils/to_string.hh"
|
#include "utils/to_string.hh"
|
||||||
#include "data_dictionary/data_dictionary.hh"
|
#include "data_dictionary/data_dictionary.hh"
|
||||||
#include "cql3/query_processor.hh"
|
#include "cql3/query_processor.hh"
|
||||||
#include "db/config.hh"
|
#include "db/config.hh"
|
||||||
|
|
||||||
|
static const auto CERT_AUTH_NAME = "com.scylladb.auth.CertificateAuthenticator";
|
||||||
|
const std::string_view auth::certificate_authenticator_name(CERT_AUTH_NAME);
|
||||||
|
|
||||||
static logging::logger clogger("certificate_authenticator");
|
static logging::logger clogger("certificate_authenticator");
|
||||||
|
|
||||||
@@ -27,11 +30,19 @@ static const std::string cfg_query_attr = "query";
|
|||||||
static const std::string cfg_source_subject = "SUBJECT";
|
static const std::string cfg_source_subject = "SUBJECT";
|
||||||
static const std::string cfg_source_altname = "ALTNAME";
|
static const std::string cfg_source_altname = "ALTNAME";
|
||||||
|
|
||||||
|
static const class_registrator<auth::authenticator
|
||||||
|
, auth::certificate_authenticator
|
||||||
|
, cql3::query_processor&
|
||||||
|
, ::service::raft_group0_client&
|
||||||
|
, ::service::migration_manager&
|
||||||
|
, auth::cache&
|
||||||
|
, utils::alien_worker&> cert_auth_reg(CERT_AUTH_NAME);
|
||||||
|
|
||||||
enum class auth::certificate_authenticator::query_source {
|
enum class auth::certificate_authenticator::query_source {
|
||||||
subject, altname
|
subject, altname
|
||||||
};
|
};
|
||||||
|
|
||||||
auth::certificate_authenticator::certificate_authenticator(cql3::query_processor& qp, ::service::raft_group0_client&, ::service::migration_manager&, auth::cache&)
|
auth::certificate_authenticator::certificate_authenticator(cql3::query_processor& qp, ::service::raft_group0_client&, ::service::migration_manager&, auth::cache&, utils::alien_worker&)
|
||||||
: _queries([&] {
|
: _queries([&] {
|
||||||
auto& conf = qp.db().get_config();
|
auto& conf = qp.db().get_config();
|
||||||
auto queries = conf.auth_certificate_role_queries();
|
auto queries = conf.auth_certificate_role_queries();
|
||||||
@@ -66,9 +77,9 @@ auth::certificate_authenticator::certificate_authenticator(cql3::query_processor
|
|||||||
throw std::invalid_argument(fmt::format("Invalid source: {}", map.at(cfg_source_attr)));
|
throw std::invalid_argument(fmt::format("Invalid source: {}", map.at(cfg_source_attr)));
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
} catch (const std::out_of_range&) {
|
} catch (std::out_of_range&) {
|
||||||
// just fallthrough
|
// just fallthrough
|
||||||
} catch (const boost::regex_error&) {
|
} catch (boost::regex_error&) {
|
||||||
std::throw_with_nested(std::invalid_argument(fmt::format("Invalid query expression: {}", map.at(cfg_query_attr))));
|
std::throw_with_nested(std::invalid_argument(fmt::format("Invalid query expression: {}", map.at(cfg_query_attr))));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,7 +100,7 @@ future<> auth::certificate_authenticator::stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string_view auth::certificate_authenticator::qualified_java_name() const {
|
std::string_view auth::certificate_authenticator::qualified_java_name() const {
|
||||||
return "com.scylladb.auth.CertificateAuthenticator";
|
return certificate_authenticator_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool auth::certificate_authenticator::require_authentication() const {
|
bool auth::certificate_authenticator::require_authentication() const {
|
||||||
|
|||||||
@@ -10,6 +10,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "auth/authenticator.hh"
|
#include "auth/authenticator.hh"
|
||||||
|
#include "utils/alien_worker.hh"
|
||||||
#include <boost/regex_fwd.hpp> // IWYU pragma: keep
|
#include <boost/regex_fwd.hpp> // IWYU pragma: keep
|
||||||
|
|
||||||
namespace cql3 {
|
namespace cql3 {
|
||||||
@@ -27,11 +28,13 @@ namespace auth {
|
|||||||
|
|
||||||
class cache;
|
class cache;
|
||||||
|
|
||||||
|
extern const std::string_view certificate_authenticator_name;
|
||||||
|
|
||||||
class certificate_authenticator : public authenticator {
|
class certificate_authenticator : public authenticator {
|
||||||
enum class query_source;
|
enum class query_source;
|
||||||
std::vector<std::pair<query_source, boost::regex>> _queries;
|
std::vector<std::pair<query_source, boost::regex>> _queries;
|
||||||
public:
|
public:
|
||||||
certificate_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&);
|
certificate_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&, utils::alien_worker&);
|
||||||
~certificate_authenticator();
|
~certificate_authenticator();
|
||||||
|
|
||||||
future<> start() override;
|
future<> start() override;
|
||||||
|
|||||||
110
auth/common.cc
110
auth/common.cc
@@ -14,11 +14,18 @@
|
|||||||
#include <seastar/core/sharded.hh>
|
#include <seastar/core/sharded.hh>
|
||||||
|
|
||||||
#include "mutation/canonical_mutation.hh"
|
#include "mutation/canonical_mutation.hh"
|
||||||
|
#include "schema/schema_fwd.hh"
|
||||||
#include "mutation/timestamp.hh"
|
#include "mutation/timestamp.hh"
|
||||||
|
#include "utils/assert.hh"
|
||||||
#include "utils/exponential_backoff_retry.hh"
|
#include "utils/exponential_backoff_retry.hh"
|
||||||
#include "cql3/query_processor.hh"
|
#include "cql3/query_processor.hh"
|
||||||
|
#include "cql3/statements/create_table_statement.hh"
|
||||||
|
#include "schema/schema_builder.hh"
|
||||||
|
#include "service/migration_manager.hh"
|
||||||
#include "service/raft/group0_state_machine.hh"
|
#include "service/raft/group0_state_machine.hh"
|
||||||
#include "timeout_config.hh"
|
#include "timeout_config.hh"
|
||||||
|
#include "utils/error_injection.hh"
|
||||||
|
#include "db/system_keyspace.hh"
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
@@ -26,14 +33,22 @@ namespace meta {
|
|||||||
|
|
||||||
namespace legacy {
|
namespace legacy {
|
||||||
constinit const std::string_view AUTH_KS("system_auth");
|
constinit const std::string_view AUTH_KS("system_auth");
|
||||||
|
constinit const std::string_view USERS_CF("users");
|
||||||
} // namespace legacy
|
} // namespace legacy
|
||||||
constinit const std::string_view AUTH_PACKAGE_NAME("org.apache.cassandra.auth.");
|
constinit const std::string_view AUTH_PACKAGE_NAME("org.apache.cassandra.auth.");
|
||||||
} // namespace meta
|
} // namespace meta
|
||||||
|
|
||||||
static logging::logger auth_log("auth");
|
static logging::logger auth_log("auth");
|
||||||
|
|
||||||
std::string default_superuser(cql3::query_processor& qp) {
|
bool legacy_mode(cql3::query_processor& qp) {
|
||||||
return qp.db().get_config().auth_superuser_name();
|
return qp.auth_version < db::auth_version_t::v2;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string_view get_auth_ks_name(cql3::query_processor& qp) {
|
||||||
|
if (legacy_mode(qp)) {
|
||||||
|
return meta::legacy::AUTH_KS;
|
||||||
|
}
|
||||||
|
return db::system_keyspace::NAME;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Func must support being invoked more than once.
|
// Func must support being invoked more than once.
|
||||||
@@ -50,6 +65,47 @@ future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_f
|
|||||||
}).discard_result();
|
}).discard_result();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static future<> create_legacy_metadata_table_if_missing_impl(
|
||||||
|
std::string_view table_name,
|
||||||
|
cql3::query_processor& qp,
|
||||||
|
std::string_view cql,
|
||||||
|
::service::migration_manager& mm) {
|
||||||
|
SCYLLA_ASSERT(this_shard_id() == 0); // once_among_shards makes sure a function is executed on shard 0 only
|
||||||
|
|
||||||
|
auto db = qp.db();
|
||||||
|
auto parsed_statement = cql3::query_processor::parse_statement(cql, cql3::dialect{});
|
||||||
|
auto& parsed_cf_statement = static_cast<cql3::statements::raw::cf_statement&>(*parsed_statement);
|
||||||
|
|
||||||
|
parsed_cf_statement.prepare_keyspace(meta::legacy::AUTH_KS);
|
||||||
|
|
||||||
|
auto statement = static_pointer_cast<cql3::statements::create_table_statement>(
|
||||||
|
parsed_cf_statement.prepare(db, qp.get_cql_stats())->statement);
|
||||||
|
|
||||||
|
const auto schema = statement->get_cf_meta_data(qp.db());
|
||||||
|
const auto uuid = generate_legacy_id(schema->ks_name(), schema->cf_name());
|
||||||
|
|
||||||
|
schema_builder b(schema);
|
||||||
|
b.set_uuid(uuid);
|
||||||
|
schema_ptr table = b.build();
|
||||||
|
|
||||||
|
if (!db.has_schema(table->ks_name(), table->cf_name())) {
|
||||||
|
auto group0_guard = co_await mm.start_group0_operation();
|
||||||
|
auto ts = group0_guard.write_timestamp();
|
||||||
|
try {
|
||||||
|
co_return co_await mm.announce(co_await ::service::prepare_new_column_family_announcement(qp.proxy(), table, ts),
|
||||||
|
std::move(group0_guard), format("auth: create {} metadata table", table->cf_name()));
|
||||||
|
} catch (exceptions::already_exists_exception&) {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
future<> create_legacy_metadata_table_if_missing(
|
||||||
|
std::string_view table_name,
|
||||||
|
cql3::query_processor& qp,
|
||||||
|
std::string_view cql,
|
||||||
|
::service::migration_manager& mm) noexcept {
|
||||||
|
return futurize_invoke(create_legacy_metadata_table_if_missing_impl, table_name, qp, cql, mm);
|
||||||
|
}
|
||||||
|
|
||||||
::service::query_state& internal_distributed_query_state() noexcept {
|
::service::query_state& internal_distributed_query_state() noexcept {
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
// Give the much slower debug tests more headroom for completing auth queries.
|
// Give the much slower debug tests more headroom for completing auth queries.
|
||||||
@@ -84,6 +140,56 @@ static future<> announce_mutations_with_guard(
|
|||||||
return group0_client.add_entry(std::move(group0_cmd), std::move(group0_guard), as, timeout);
|
return group0_client.add_entry(std::move(group0_cmd), std::move(group0_guard), as, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
future<> announce_mutations_with_batching(
|
||||||
|
::service::raft_group0_client& group0_client,
|
||||||
|
start_operation_func_t start_operation_func,
|
||||||
|
std::function<::service::mutations_generator(api::timestamp_type t)> gen,
|
||||||
|
seastar::abort_source& as,
|
||||||
|
std::optional<::service::raft_timeout> timeout) {
|
||||||
|
// account for command's overhead, it's better to use smaller threshold than constantly bounce off the limit
|
||||||
|
size_t memory_threshold = group0_client.max_command_size() * 0.75;
|
||||||
|
utils::get_local_injector().inject("auth_announce_mutations_command_max_size",
|
||||||
|
[&memory_threshold] {
|
||||||
|
memory_threshold = 1000;
|
||||||
|
});
|
||||||
|
|
||||||
|
size_t memory_usage = 0;
|
||||||
|
utils::chunked_vector<canonical_mutation> muts;
|
||||||
|
|
||||||
|
// guard has to be taken before we execute code in gen as
|
||||||
|
// it can do read-before-write and we want announce_mutations
|
||||||
|
// operation to be linearizable with other such calls,
|
||||||
|
// for instance if we do select and then delete in gen
|
||||||
|
// we want both to operate on the same data or fail
|
||||||
|
// if someone else modified it in the middle
|
||||||
|
std::optional<::service::group0_guard> group0_guard;
|
||||||
|
group0_guard = co_await start_operation_func(as);
|
||||||
|
auto timestamp = group0_guard->write_timestamp();
|
||||||
|
|
||||||
|
auto g = gen(timestamp);
|
||||||
|
while (auto mut = co_await g()) {
|
||||||
|
muts.push_back(canonical_mutation{*mut});
|
||||||
|
memory_usage += muts.back().representation().size();
|
||||||
|
if (memory_usage >= memory_threshold) {
|
||||||
|
if (!group0_guard) {
|
||||||
|
group0_guard = co_await start_operation_func(as);
|
||||||
|
timestamp = group0_guard->write_timestamp();
|
||||||
|
}
|
||||||
|
co_await announce_mutations_with_guard(group0_client, std::move(muts), std::move(*group0_guard), as, timeout);
|
||||||
|
group0_guard = std::nullopt;
|
||||||
|
memory_usage = 0;
|
||||||
|
muts = {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!muts.empty()) {
|
||||||
|
if (!group0_guard) {
|
||||||
|
group0_guard = co_await start_operation_func(as);
|
||||||
|
timestamp = group0_guard->write_timestamp();
|
||||||
|
}
|
||||||
|
co_await announce_mutations_with_guard(group0_client, std::move(muts), std::move(*group0_guard), as, timeout);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
future<> announce_mutations(
|
future<> announce_mutations(
|
||||||
cql3::query_processor& qp,
|
cql3::query_processor& qp,
|
||||||
::service::raft_group0_client& group0_client,
|
::service::raft_group0_client& group0_client,
|
||||||
|
|||||||
@@ -21,7 +21,12 @@
|
|||||||
|
|
||||||
using namespace std::chrono_literals;
|
using namespace std::chrono_literals;
|
||||||
|
|
||||||
|
namespace replica {
|
||||||
|
class database;
|
||||||
|
}
|
||||||
|
|
||||||
namespace service {
|
namespace service {
|
||||||
|
class migration_manager;
|
||||||
class query_state;
|
class query_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,8 +40,10 @@ namespace meta {
|
|||||||
|
|
||||||
namespace legacy {
|
namespace legacy {
|
||||||
extern constinit const std::string_view AUTH_KS;
|
extern constinit const std::string_view AUTH_KS;
|
||||||
|
extern constinit const std::string_view USERS_CF;
|
||||||
} // namespace legacy
|
} // namespace legacy
|
||||||
|
|
||||||
|
constexpr std::string_view DEFAULT_SUPERUSER_NAME("cassandra");
|
||||||
extern constinit const std::string_view AUTH_PACKAGE_NAME;
|
extern constinit const std::string_view AUTH_PACKAGE_NAME;
|
||||||
|
|
||||||
} // namespace meta
|
} // namespace meta
|
||||||
@@ -45,7 +52,12 @@ constexpr std::string_view PERMISSIONS_CF = "role_permissions";
|
|||||||
constexpr std::string_view ROLE_MEMBERS_CF = "role_members";
|
constexpr std::string_view ROLE_MEMBERS_CF = "role_members";
|
||||||
constexpr std::string_view ROLE_ATTRIBUTES_CF = "role_attributes";
|
constexpr std::string_view ROLE_ATTRIBUTES_CF = "role_attributes";
|
||||||
|
|
||||||
std::string default_superuser(cql3::query_processor& qp);
|
// This is a helper to check whether auth-v2 is on.
|
||||||
|
bool legacy_mode(cql3::query_processor& qp);
|
||||||
|
|
||||||
|
// We have legacy implementation using different keyspace
|
||||||
|
// and need to parametrize depending on runtime feature.
|
||||||
|
std::string_view get_auth_ks_name(cql3::query_processor& qp);
|
||||||
|
|
||||||
template <class Task>
|
template <class Task>
|
||||||
future<> once_among_shards(Task&& f) {
|
future<> once_among_shards(Task&& f) {
|
||||||
@@ -59,6 +71,12 @@ future<> once_among_shards(Task&& f) {
|
|||||||
// Func must support being invoked more than once.
|
// Func must support being invoked more than once.
|
||||||
future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_function<future<>()> func);
|
future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_function<future<>()> func);
|
||||||
|
|
||||||
|
future<> create_legacy_metadata_table_if_missing(
|
||||||
|
std::string_view table_name,
|
||||||
|
cql3::query_processor&,
|
||||||
|
std::string_view cql,
|
||||||
|
::service::migration_manager&) noexcept;
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Time-outs for internal, non-local CQL queries.
|
/// Time-outs for internal, non-local CQL queries.
|
||||||
///
|
///
|
||||||
@@ -66,6 +84,20 @@ future<> do_after_system_ready(seastar::abort_source& as, seastar::noncopyable_f
|
|||||||
|
|
||||||
::service::raft_timeout get_raft_timeout() noexcept;
|
::service::raft_timeout get_raft_timeout() noexcept;
|
||||||
|
|
||||||
|
// Execute update query via group0 mechanism, mutations will be applied on all nodes.
|
||||||
|
// Use this function when need to perform read before write on a single guard or if
|
||||||
|
// you have more than one mutation and potentially exceed single command size limit.
|
||||||
|
using start_operation_func_t = std::function<future<::service::group0_guard>(abort_source&)>;
|
||||||
|
future<> announce_mutations_with_batching(
|
||||||
|
::service::raft_group0_client& group0_client,
|
||||||
|
// since we can operate also in topology coordinator context where we need stronger
|
||||||
|
// guarantees than start_operation from group0_client gives we allow to inject custom
|
||||||
|
// function here
|
||||||
|
start_operation_func_t start_operation_func,
|
||||||
|
std::function<::service::mutations_generator(api::timestamp_type t)> gen,
|
||||||
|
seastar::abort_source& as,
|
||||||
|
std::optional<::service::raft_timeout> timeout);
|
||||||
|
|
||||||
// Execute update query via group0 mechanism, mutations will be applied on all nodes.
|
// Execute update query via group0 mechanism, mutations will be applied on all nodes.
|
||||||
future<> announce_mutations(
|
future<> announce_mutations(
|
||||||
cql3::query_processor& qp,
|
cql3::query_processor& qp,
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ extern "C" {
|
|||||||
#include "cql3/untyped_result_set.hh"
|
#include "cql3/untyped_result_set.hh"
|
||||||
#include "exceptions/exceptions.hh"
|
#include "exceptions/exceptions.hh"
|
||||||
#include "utils/log.hh"
|
#include "utils/log.hh"
|
||||||
|
#include "utils/class_registrator.hh"
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
@@ -39,14 +40,111 @@ static constexpr std::string_view PERMISSIONS_NAME = "permissions";
|
|||||||
|
|
||||||
static logging::logger alogger("default_authorizer");
|
static logging::logger alogger("default_authorizer");
|
||||||
|
|
||||||
default_authorizer::default_authorizer(cql3::query_processor& qp)
|
// To ensure correct initialization order, we unfortunately need to use a string literal.
|
||||||
: _qp(qp) {
|
static const class_registrator<
|
||||||
|
authorizer,
|
||||||
|
default_authorizer,
|
||||||
|
cql3::query_processor&,
|
||||||
|
::service::raft_group0_client&,
|
||||||
|
::service::migration_manager&> password_auth_reg("org.apache.cassandra.auth.CassandraAuthorizer");
|
||||||
|
|
||||||
|
default_authorizer::default_authorizer(cql3::query_processor& qp, ::service::raft_group0_client& g0, ::service::migration_manager& mm)
|
||||||
|
: _qp(qp)
|
||||||
|
, _migration_manager(mm) {
|
||||||
}
|
}
|
||||||
|
|
||||||
default_authorizer::~default_authorizer() {
|
default_authorizer::~default_authorizer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const sstring legacy_table_name{"permissions"};
|
||||||
|
|
||||||
|
bool default_authorizer::legacy_metadata_exists() const {
|
||||||
|
return _qp.db().has_schema(meta::legacy::AUTH_KS, legacy_table_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
future<bool> default_authorizer::legacy_any_granted() const {
|
||||||
|
static const sstring query = seastar::format("SELECT * FROM {}.{} LIMIT 1", meta::legacy::AUTH_KS, PERMISSIONS_CF);
|
||||||
|
|
||||||
|
return _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::LOCAL_ONE,
|
||||||
|
{},
|
||||||
|
cql3::query_processor::cache_internal::yes).then([](::shared_ptr<cql3::untyped_result_set> results) {
|
||||||
|
return !results->empty();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
future<> default_authorizer::migrate_legacy_metadata() {
|
||||||
|
alogger.info("Starting migration of legacy permissions metadata.");
|
||||||
|
static const sstring query = seastar::format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
|
||||||
|
|
||||||
|
return _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::LOCAL_ONE,
|
||||||
|
cql3::query_processor::cache_internal::no).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||||
|
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||||
|
return do_with(
|
||||||
|
row.get_as<sstring>("username"),
|
||||||
|
parse_resource(row.get_as<sstring>(RESOURCE_NAME)),
|
||||||
|
::service::group0_batch::unused(),
|
||||||
|
[this, &row](const auto& username, const auto& r, auto& mc) {
|
||||||
|
const permission_set perms = permissions::from_strings(row.get_set<sstring>(PERMISSIONS_NAME));
|
||||||
|
return grant(username, perms, r, mc);
|
||||||
|
});
|
||||||
|
}).finally([results] {});
|
||||||
|
}).then([] {
|
||||||
|
alogger.info("Finished migrating legacy permissions metadata.");
|
||||||
|
}).handle_exception([](std::exception_ptr ep) {
|
||||||
|
alogger.error("Encountered an error during migration!");
|
||||||
|
std::rethrow_exception(ep);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
future<> default_authorizer::start_legacy() {
|
||||||
|
static const sstring create_table = fmt::format(
|
||||||
|
"CREATE TABLE {}.{} ("
|
||||||
|
"{} text,"
|
||||||
|
"{} text,"
|
||||||
|
"{} set<text>,"
|
||||||
|
"PRIMARY KEY({}, {})"
|
||||||
|
") WITH gc_grace_seconds={}",
|
||||||
|
meta::legacy::AUTH_KS,
|
||||||
|
PERMISSIONS_CF,
|
||||||
|
ROLE_NAME,
|
||||||
|
RESOURCE_NAME,
|
||||||
|
PERMISSIONS_NAME,
|
||||||
|
ROLE_NAME,
|
||||||
|
RESOURCE_NAME,
|
||||||
|
90 * 24 * 60 * 60); // 3 months.
|
||||||
|
|
||||||
|
return once_among_shards([this] {
|
||||||
|
return create_legacy_metadata_table_if_missing(
|
||||||
|
PERMISSIONS_CF,
|
||||||
|
_qp,
|
||||||
|
create_table,
|
||||||
|
_migration_manager).then([this] {
|
||||||
|
_finished = do_after_system_ready(_as, [this] {
|
||||||
|
return async([this] {
|
||||||
|
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||||
|
|
||||||
|
if (legacy_metadata_exists()) {
|
||||||
|
if (!legacy_any_granted().get()) {
|
||||||
|
migrate_legacy_metadata().get();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
alogger.warn("Ignoring legacy permissions metadata since role permissions exist.");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
future<> default_authorizer::start() {
|
future<> default_authorizer::start() {
|
||||||
|
if (legacy_mode(_qp)) {
|
||||||
|
return start_legacy();
|
||||||
|
}
|
||||||
return make_ready_future<>();
|
return make_ready_future<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +161,7 @@ default_authorizer::authorize(const role_or_anonymous& maybe_role, const resourc
|
|||||||
|
|
||||||
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? AND {} = ?",
|
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? AND {} = ?",
|
||||||
PERMISSIONS_NAME,
|
PERMISSIONS_NAME,
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF,
|
PERMISSIONS_CF,
|
||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
RESOURCE_NAME);
|
RESOURCE_NAME);
|
||||||
@@ -87,13 +185,21 @@ default_authorizer::modify(
|
|||||||
std::string_view op,
|
std::string_view op,
|
||||||
::service::group0_batch& mc) {
|
::service::group0_batch& mc) {
|
||||||
const sstring query = seastar::format("UPDATE {}.{} SET {} = {} {} ? WHERE {} = ? AND {} = ?",
|
const sstring query = seastar::format("UPDATE {}.{} SET {} = {} {} ? WHERE {} = ? AND {} = ?",
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF,
|
PERMISSIONS_CF,
|
||||||
PERMISSIONS_NAME,
|
PERMISSIONS_NAME,
|
||||||
PERMISSIONS_NAME,
|
PERMISSIONS_NAME,
|
||||||
op,
|
op,
|
||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
RESOURCE_NAME);
|
RESOURCE_NAME);
|
||||||
|
if (legacy_mode(_qp)) {
|
||||||
|
co_return co_await _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::ONE,
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{permissions::to_strings(set), sstring(role_name), resource.name()},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result();
|
||||||
|
}
|
||||||
co_await collect_mutations(_qp, mc, query,
|
co_await collect_mutations(_qp, mc, query,
|
||||||
{permissions::to_strings(set), sstring(role_name), resource.name()});
|
{permissions::to_strings(set), sstring(role_name), resource.name()});
|
||||||
}
|
}
|
||||||
@@ -112,7 +218,7 @@ future<std::vector<permission_details>> default_authorizer::list_all() const {
|
|||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
RESOURCE_NAME,
|
RESOURCE_NAME,
|
||||||
PERMISSIONS_NAME,
|
PERMISSIONS_NAME,
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF);
|
PERMISSIONS_CF);
|
||||||
|
|
||||||
const auto results = co_await _qp.execute_internal(
|
const auto results = co_await _qp.execute_internal(
|
||||||
@@ -137,16 +243,74 @@ future<std::vector<permission_details>> default_authorizer::list_all() const {
|
|||||||
future<> default_authorizer::revoke_all(std::string_view role_name, ::service::group0_batch& mc) {
|
future<> default_authorizer::revoke_all(std::string_view role_name, ::service::group0_batch& mc) {
|
||||||
try {
|
try {
|
||||||
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ?",
|
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ?",
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF,
|
PERMISSIONS_CF,
|
||||||
ROLE_NAME);
|
ROLE_NAME);
|
||||||
co_await collect_mutations(_qp, mc, query, {sstring(role_name)});
|
if (legacy_mode(_qp)) {
|
||||||
} catch (const exceptions::request_execution_exception& e) {
|
co_await _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::ONE,
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{sstring(role_name)},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result();
|
||||||
|
} else {
|
||||||
|
co_await collect_mutations(_qp, mc, query, {sstring(role_name)});
|
||||||
|
}
|
||||||
|
} catch (exceptions::request_execution_exception& e) {
|
||||||
alogger.warn("CassandraAuthorizer failed to revoke all permissions of {}: {}", role_name, e);
|
alogger.warn("CassandraAuthorizer failed to revoke all permissions of {}: {}", role_name, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
future<> default_authorizer::revoke_all_legacy(const resource& resource) {
|
||||||
|
static const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
|
||||||
|
ROLE_NAME,
|
||||||
|
get_auth_ks_name(_qp),
|
||||||
|
PERMISSIONS_CF,
|
||||||
|
RESOURCE_NAME);
|
||||||
|
|
||||||
|
return _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::LOCAL_ONE,
|
||||||
|
{resource.name()},
|
||||||
|
cql3::query_processor::cache_internal::no).then_wrapped([this, resource](future<::shared_ptr<cql3::untyped_result_set>> f) {
|
||||||
|
try {
|
||||||
|
auto res = f.get();
|
||||||
|
return parallel_for_each(
|
||||||
|
res->begin(),
|
||||||
|
res->end(),
|
||||||
|
[this, res, resource](const cql3::untyped_result_set::row& r) {
|
||||||
|
static const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
|
||||||
|
get_auth_ks_name(_qp),
|
||||||
|
PERMISSIONS_CF,
|
||||||
|
ROLE_NAME,
|
||||||
|
RESOURCE_NAME);
|
||||||
|
|
||||||
|
return _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::LOCAL_ONE,
|
||||||
|
{r.get_as<sstring>(ROLE_NAME), resource.name()},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result().handle_exception(
|
||||||
|
[resource](auto ep) {
|
||||||
|
try {
|
||||||
|
std::rethrow_exception(ep);
|
||||||
|
} catch (exceptions::request_execution_exception& e) {
|
||||||
|
alogger.warn("CassandraAuthorizer failed to revoke all permissions on {}: {}", resource, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} catch (exceptions::request_execution_exception& e) {
|
||||||
|
alogger.warn("CassandraAuthorizer failed to revoke all permissions on {}: {}", resource, e);
|
||||||
|
return make_ready_future();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
future<> default_authorizer::revoke_all(const resource& resource, ::service::group0_batch& mc) {
|
future<> default_authorizer::revoke_all(const resource& resource, ::service::group0_batch& mc) {
|
||||||
|
if (legacy_mode(_qp)) {
|
||||||
|
co_return co_await revoke_all_legacy(resource);
|
||||||
|
}
|
||||||
|
|
||||||
if (resource.kind() == resource_kind::data &&
|
if (resource.kind() == resource_kind::data &&
|
||||||
data_resource_view(resource).is_keyspace()) {
|
data_resource_view(resource).is_keyspace()) {
|
||||||
revoke_all_keyspace_resources(resource, mc);
|
revoke_all_keyspace_resources(resource, mc);
|
||||||
@@ -157,7 +321,7 @@ future<> default_authorizer::revoke_all(const resource& resource, ::service::gro
|
|||||||
auto gen = [this, name] (api::timestamp_type t) -> ::service::mutations_generator {
|
auto gen = [this, name] (api::timestamp_type t) -> ::service::mutations_generator {
|
||||||
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
|
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ? ALLOW FILTERING",
|
||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF,
|
PERMISSIONS_CF,
|
||||||
RESOURCE_NAME);
|
RESOURCE_NAME);
|
||||||
auto res = co_await _qp.execute_internal(
|
auto res = co_await _qp.execute_internal(
|
||||||
@@ -167,7 +331,7 @@ future<> default_authorizer::revoke_all(const resource& resource, ::service::gro
|
|||||||
cql3::query_processor::cache_internal::no);
|
cql3::query_processor::cache_internal::no);
|
||||||
for (const auto& r : *res) {
|
for (const auto& r : *res) {
|
||||||
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
|
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF,
|
PERMISSIONS_CF,
|
||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
RESOURCE_NAME);
|
RESOURCE_NAME);
|
||||||
@@ -192,7 +356,7 @@ void default_authorizer::revoke_all_keyspace_resources(const resource& ks_resour
|
|||||||
const sstring query = seastar::format("SELECT {}, {} FROM {}.{}",
|
const sstring query = seastar::format("SELECT {}, {} FROM {}.{}",
|
||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
RESOURCE_NAME,
|
RESOURCE_NAME,
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF);
|
PERMISSIONS_CF);
|
||||||
auto res = co_await _qp.execute_internal(
|
auto res = co_await _qp.execute_internal(
|
||||||
query,
|
query,
|
||||||
@@ -207,7 +371,7 @@ void default_authorizer::revoke_all_keyspace_resources(const resource& ks_resour
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
|
const sstring query = seastar::format("DELETE FROM {}.{} WHERE {} = ? AND {} = ?",
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
PERMISSIONS_CF,
|
PERMISSIONS_CF,
|
||||||
ROLE_NAME,
|
ROLE_NAME,
|
||||||
RESOURCE_NAME);
|
RESOURCE_NAME);
|
||||||
|
|||||||
@@ -27,12 +27,14 @@ namespace auth {
|
|||||||
class default_authorizer : public authorizer {
|
class default_authorizer : public authorizer {
|
||||||
cql3::query_processor& _qp;
|
cql3::query_processor& _qp;
|
||||||
|
|
||||||
|
::service::migration_manager& _migration_manager;
|
||||||
|
|
||||||
abort_source _as{};
|
abort_source _as{};
|
||||||
|
|
||||||
future<> _finished{make_ready_future<>()};
|
future<> _finished{make_ready_future<>()};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
default_authorizer(cql3::query_processor&);
|
default_authorizer(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&);
|
||||||
|
|
||||||
~default_authorizer();
|
~default_authorizer();
|
||||||
|
|
||||||
@@ -57,6 +59,16 @@ public:
|
|||||||
virtual const resource_set& protected_resources() const override;
|
virtual const resource_set& protected_resources() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
future<> start_legacy();
|
||||||
|
|
||||||
|
bool legacy_metadata_exists() const;
|
||||||
|
|
||||||
|
future<> revoke_all_legacy(const resource&);
|
||||||
|
|
||||||
|
future<bool> legacy_any_granted() const;
|
||||||
|
|
||||||
|
future<> migrate_legacy_metadata();
|
||||||
|
|
||||||
future<> modify(std::string_view, permission_set, const resource&, std::string_view, ::service::group0_batch&);
|
future<> modify(std::string_view, permission_set, const resource&, std::string_view, ::service::group0_batch&);
|
||||||
|
|
||||||
void revoke_all_keyspace_resources(const resource& ks_resource, ::service::group0_batch& mc);
|
void revoke_all_keyspace_resources(const resource& ks_resource, ::service::group0_batch& mc);
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
#include "exceptions/exceptions.hh"
|
#include "exceptions/exceptions.hh"
|
||||||
#include "seastarx.hh"
|
#include "seastarx.hh"
|
||||||
#include "service/raft/raft_group0_client.hh"
|
#include "service/raft/raft_group0_client.hh"
|
||||||
|
#include "utils/class_registrator.hh"
|
||||||
#include "db/config.hh"
|
#include "db/config.hh"
|
||||||
#include "utils/exponential_backoff_retry.hh"
|
#include "utils/exponential_backoff_retry.hh"
|
||||||
|
|
||||||
@@ -71,22 +72,26 @@ std::vector<sstring> get_attr_values(LDAP* ld, LDAPMessage* res, const char* att
|
|||||||
return values;
|
return values;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char* ldap_role_manager_full_name = "com.scylladb.auth.LDAPRoleManager";
|
||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
|
static const class_registrator<
|
||||||
|
role_manager,
|
||||||
|
ldap_role_manager,
|
||||||
|
cql3::query_processor&,
|
||||||
|
::service::raft_group0_client&,
|
||||||
|
::service::migration_manager&,
|
||||||
|
cache&> registration(ldap_role_manager_full_name);
|
||||||
|
|
||||||
ldap_role_manager::ldap_role_manager(
|
ldap_role_manager::ldap_role_manager(
|
||||||
std::string_view query_template, std::string_view target_attr, std::string_view bind_name, std::string_view bind_password,
|
std::string_view query_template, std::string_view target_attr, std::string_view bind_name, std::string_view bind_password,
|
||||||
uint32_t permissions_update_interval_in_ms,
|
|
||||||
utils::observer<uint32_t> permissions_update_interval_in_ms_observer,
|
|
||||||
cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm, cache& cache)
|
cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm, cache& cache)
|
||||||
: _std_mgr(qp, rg0c, mm, cache), _group0_client(rg0c), _query_template(query_template), _target_attr(target_attr), _bind_name(bind_name)
|
: _std_mgr(qp, rg0c, mm, cache), _group0_client(rg0c), _query_template(query_template), _target_attr(target_attr), _bind_name(bind_name)
|
||||||
, _bind_password(bind_password)
|
, _bind_password(bind_password)
|
||||||
, _permissions_update_interval_in_ms(permissions_update_interval_in_ms)
|
, _connection_factory(bind(std::mem_fn(&ldap_role_manager::reconnect), std::ref(*this))) {
|
||||||
, _permissions_update_interval_in_ms_observer(std::move(permissions_update_interval_in_ms_observer))
|
|
||||||
, _connection_factory(bind(std::mem_fn(&ldap_role_manager::reconnect), std::ref(*this)))
|
|
||||||
, _cache(cache)
|
|
||||||
, _cache_pruner(make_ready_future<>()) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ldap_role_manager::ldap_role_manager(cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm, cache& cache)
|
ldap_role_manager::ldap_role_manager(cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm, cache& cache)
|
||||||
@@ -95,8 +100,6 @@ ldap_role_manager::ldap_role_manager(cql3::query_processor& qp, ::service::raft_
|
|||||||
qp.db().get_config().ldap_attr_role(),
|
qp.db().get_config().ldap_attr_role(),
|
||||||
qp.db().get_config().ldap_bind_dn(),
|
qp.db().get_config().ldap_bind_dn(),
|
||||||
qp.db().get_config().ldap_bind_passwd(),
|
qp.db().get_config().ldap_bind_passwd(),
|
||||||
qp.db().get_config().permissions_update_interval_in_ms(),
|
|
||||||
qp.db().get_config().permissions_update_interval_in_ms.observe([this] (const uint32_t& v) { _permissions_update_interval_in_ms = v; }),
|
|
||||||
qp,
|
qp,
|
||||||
rg0c,
|
rg0c,
|
||||||
mm,
|
mm,
|
||||||
@@ -104,7 +107,7 @@ ldap_role_manager::ldap_role_manager(cql3::query_processor& qp, ::service::raft_
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string_view ldap_role_manager::qualified_java_name() const noexcept {
|
std::string_view ldap_role_manager::qualified_java_name() const noexcept {
|
||||||
return "com.scylladb.auth.LDAPRoleManager";
|
return ldap_role_manager_full_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
const resource_set& ldap_role_manager::protected_resources() const {
|
const resource_set& ldap_role_manager::protected_resources() const {
|
||||||
@@ -116,22 +119,6 @@ future<> ldap_role_manager::start() {
|
|||||||
return make_exception_future(
|
return make_exception_future(
|
||||||
std::runtime_error(fmt::format("error getting LDAP server address from template {}", _query_template)));
|
std::runtime_error(fmt::format("error getting LDAP server address from template {}", _query_template)));
|
||||||
}
|
}
|
||||||
_cache_pruner = futurize_invoke([this] () -> future<> {
|
|
||||||
while (true) {
|
|
||||||
try {
|
|
||||||
co_await seastar::sleep_abortable(std::chrono::milliseconds(_permissions_update_interval_in_ms), _as);
|
|
||||||
} catch (const seastar::sleep_aborted&) {
|
|
||||||
co_return; // ignore
|
|
||||||
}
|
|
||||||
co_await _cache.container().invoke_on_all([] (cache& c) -> future<> {
|
|
||||||
try {
|
|
||||||
co_await c.reload_all_permissions();
|
|
||||||
} catch (...) {
|
|
||||||
mylog.warn("Cache reload all permissions failed: {}", std::current_exception());
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return _std_mgr.start();
|
return _std_mgr.start();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,11 +175,7 @@ future<conn_ptr> ldap_role_manager::reconnect() {
|
|||||||
|
|
||||||
future<> ldap_role_manager::stop() {
|
future<> ldap_role_manager::stop() {
|
||||||
_as.request_abort();
|
_as.request_abort();
|
||||||
return std::move(_cache_pruner).then([this] {
|
return _std_mgr.stop().then([this] { return _connection_factory.stop(); });
|
||||||
return _std_mgr.stop();
|
|
||||||
}).then([this] {
|
|
||||||
return _connection_factory.stop();
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> ldap_role_manager::create(std::string_view name, const role_config& config, ::service::group0_batch& mc) {
|
future<> ldap_role_manager::create(std::string_view name, const role_config& config, ::service::group0_batch& mc) {
|
||||||
|
|||||||
@@ -10,7 +10,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <seastar/core/abort_source.hh>
|
#include <seastar/core/abort_source.hh>
|
||||||
#include <seastar/core/future.hh>
|
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
#include "ent/ldap/ldap_connection.hh"
|
#include "ent/ldap/ldap_connection.hh"
|
||||||
@@ -35,29 +34,22 @@ class ldap_role_manager : public role_manager {
|
|||||||
seastar::sstring _target_attr; ///< LDAP entry attribute containing the Scylla role name.
|
seastar::sstring _target_attr; ///< LDAP entry attribute containing the Scylla role name.
|
||||||
seastar::sstring _bind_name; ///< Username for LDAP simple bind.
|
seastar::sstring _bind_name; ///< Username for LDAP simple bind.
|
||||||
seastar::sstring _bind_password; ///< Password for LDAP simple bind.
|
seastar::sstring _bind_password; ///< Password for LDAP simple bind.
|
||||||
|
|
||||||
uint32_t _permissions_update_interval_in_ms;
|
|
||||||
utils::observer<uint32_t> _permissions_update_interval_in_ms_observer;
|
|
||||||
|
|
||||||
mutable ldap_reuser _connection_factory; // Potentially modified by query_granted().
|
mutable ldap_reuser _connection_factory; // Potentially modified by query_granted().
|
||||||
seastar::abort_source _as;
|
seastar::abort_source _as;
|
||||||
cache& _cache;
|
|
||||||
seastar::future<> _cache_pruner;
|
|
||||||
public:
|
public:
|
||||||
ldap_role_manager(
|
ldap_role_manager(
|
||||||
std::string_view query_template, ///< LDAP query template as described in Scylla documentation.
|
std::string_view query_template, ///< LDAP query template as described in Scylla documentation.
|
||||||
std::string_view target_attr, ///< LDAP entry attribute containing the Scylla role name.
|
std::string_view target_attr, ///< LDAP entry attribute containing the Scylla role name.
|
||||||
std::string_view bind_name, ///< LDAP bind credentials.
|
std::string_view bind_name, ///< LDAP bind credentials.
|
||||||
std::string_view bind_password, ///< LDAP bind credentials.
|
std::string_view bind_password, ///< LDAP bind credentials.
|
||||||
uint32_t permissions_update_interval_in_ms,
|
|
||||||
utils::observer<uint32_t> permissions_update_interval_in_ms_observer,
|
|
||||||
cql3::query_processor& qp, ///< Passed to standard_role_manager.
|
cql3::query_processor& qp, ///< Passed to standard_role_manager.
|
||||||
::service::raft_group0_client& rg0c, ///< Passed to standard_role_manager.
|
::service::raft_group0_client& rg0c, ///< Passed to standard_role_manager.
|
||||||
::service::migration_manager& mm, ///< Passed to standard_role_manager.
|
::service::migration_manager& mm, ///< Passed to standard_role_manager.
|
||||||
cache& cache ///< Passed to standard_role_manager.
|
cache& cache ///< Passed to standard_role_manager.
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Retrieves LDAP configuration entries from qp and invokes the other constructor.
|
/// Retrieves LDAP configuration entries from qp and invokes the other constructor. Required by
|
||||||
|
/// class_registrator<role_manager>.
|
||||||
ldap_role_manager(cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm, cache& cache);
|
ldap_role_manager(cql3::query_processor& qp, ::service::raft_group0_client& rg0c, ::service::migration_manager& mm, cache& cache);
|
||||||
|
|
||||||
/// Thrown when query-template parsing fails.
|
/// Thrown when query-template parsing fails.
|
||||||
|
|||||||
@@ -1,31 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2026-present ScyllaDB
|
|
||||||
*
|
|
||||||
* Modified by ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: (LicenseRef-ScyllaDB-Source-Available-1.0 and Apache-2.0)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "auth/maintenance_socket_authenticator.hh"
|
|
||||||
|
|
||||||
|
|
||||||
namespace auth {
|
|
||||||
|
|
||||||
maintenance_socket_authenticator::~maintenance_socket_authenticator() {
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> maintenance_socket_authenticator::start() {
|
|
||||||
return make_ready_future<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> maintenance_socket_authenticator::ensure_superuser_is_created() const {
|
|
||||||
return make_ready_future<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool maintenance_socket_authenticator::require_authentication() const {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace auth
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2026-present ScyllaDB
|
|
||||||
*
|
|
||||||
* Modified by ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: (LicenseRef-ScyllaDB-Source-Available-1.0 and Apache-2.0)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <seastar/core/shared_future.hh>
|
|
||||||
|
|
||||||
#include "password_authenticator.hh"
|
|
||||||
|
|
||||||
namespace auth {
|
|
||||||
|
|
||||||
// maintenance_socket_authenticator is used for clients connecting to the
|
|
||||||
// maintenance socket. It does not require authentication,
|
|
||||||
// while still allowing the managing of roles and their credentials.
|
|
||||||
class maintenance_socket_authenticator : public password_authenticator {
|
|
||||||
public:
|
|
||||||
using password_authenticator::password_authenticator;
|
|
||||||
|
|
||||||
virtual ~maintenance_socket_authenticator();
|
|
||||||
|
|
||||||
virtual future<> start() override;
|
|
||||||
|
|
||||||
virtual future<> ensure_superuser_is_created() const override;
|
|
||||||
|
|
||||||
bool require_authentication() const override;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace auth
|
|
||||||
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2026-present ScyllaDB
|
|
||||||
*
|
|
||||||
* Modified by ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SPDX-License-Identifier: (LicenseRef-ScyllaDB-Source-Available-1.0 and Apache-2.0)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "auth/default_authorizer.hh"
|
|
||||||
#include "auth/permission.hh"
|
|
||||||
|
|
||||||
namespace auth {
|
|
||||||
|
|
||||||
// maintenance_socket_authorizer is used for clients connecting to the
|
|
||||||
// maintenance socket. It grants all permissions unconditionally (like
|
|
||||||
// AllowAllAuthorizer) while still supporting grant/revoke operations
|
|
||||||
// (delegated to the underlying CassandraAuthorizer / default_authorizer).
|
|
||||||
class maintenance_socket_authorizer : public default_authorizer {
|
|
||||||
public:
|
|
||||||
using default_authorizer::default_authorizer;
|
|
||||||
|
|
||||||
~maintenance_socket_authorizer() override = default;
|
|
||||||
|
|
||||||
future<> start() override {
|
|
||||||
return make_ready_future<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
future<permission_set> authorize(const role_or_anonymous&, const resource&) const override {
|
|
||||||
return make_ready_future<permission_set>(permissions::ALL);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace auth
|
|
||||||
@@ -13,48 +13,23 @@
|
|||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include "auth/cache.hh"
|
#include "auth/cache.hh"
|
||||||
#include "cql3/description.hh"
|
#include "cql3/description.hh"
|
||||||
#include "utils/log.hh"
|
#include "utils/class_registrator.hh"
|
||||||
#include "utils/on_internal_error.hh"
|
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
static logging::logger log("maintenance_socket_role_manager");
|
constexpr std::string_view maintenance_socket_role_manager_name = "com.scylladb.auth.MaintenanceSocketRoleManager";
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::ensure_role_operations_are_enabled() {
|
static const class_registrator<
|
||||||
if (_is_maintenance_mode) {
|
role_manager,
|
||||||
on_internal_error(log, "enabling role operations not allowed in maintenance mode");
|
maintenance_socket_role_manager,
|
||||||
}
|
cql3::query_processor&,
|
||||||
|
::service::raft_group0_client&,
|
||||||
|
::service::migration_manager&,
|
||||||
|
cache&> registration(sstring{maintenance_socket_role_manager_name});
|
||||||
|
|
||||||
if (_std_mgr.has_value()) {
|
|
||||||
on_internal_error(log, "role operations are already enabled");
|
|
||||||
}
|
|
||||||
|
|
||||||
_std_mgr.emplace(_qp, _group0_client, _migration_manager, _cache);
|
|
||||||
return _std_mgr->start();
|
|
||||||
}
|
|
||||||
|
|
||||||
void maintenance_socket_role_manager::set_maintenance_mode() {
|
|
||||||
if (_std_mgr.has_value()) {
|
|
||||||
on_internal_error(log, "cannot enter maintenance mode after role operations have been enabled");
|
|
||||||
}
|
|
||||||
_is_maintenance_mode = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
maintenance_socket_role_manager::maintenance_socket_role_manager(
|
|
||||||
cql3::query_processor& qp,
|
|
||||||
::service::raft_group0_client& rg0c,
|
|
||||||
::service::migration_manager& mm,
|
|
||||||
cache& c)
|
|
||||||
: _qp(qp)
|
|
||||||
, _group0_client(rg0c)
|
|
||||||
, _migration_manager(mm)
|
|
||||||
, _cache(c)
|
|
||||||
, _std_mgr(std::nullopt)
|
|
||||||
, _is_maintenance_mode(false) {
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string_view maintenance_socket_role_manager::qualified_java_name() const noexcept {
|
std::string_view maintenance_socket_role_manager::qualified_java_name() const noexcept {
|
||||||
return "com.scylladb.auth.MaintenanceSocketRoleManager";
|
return maintenance_socket_role_manager_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
const resource_set& maintenance_socket_role_manager::protected_resources() const {
|
const resource_set& maintenance_socket_role_manager::protected_resources() const {
|
||||||
@@ -68,161 +43,81 @@ future<> maintenance_socket_role_manager::start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::stop() {
|
future<> maintenance_socket_role_manager::stop() {
|
||||||
return _std_mgr ? _std_mgr->stop() : make_ready_future<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::ensure_superuser_is_created() {
|
|
||||||
return _std_mgr ? _std_mgr->ensure_superuser_is_created() : make_ready_future<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename T = void>
|
|
||||||
future<T> operation_not_available_in_maintenance_mode_exception(std::string_view operation) {
|
|
||||||
return make_exception_future<T>(
|
|
||||||
std::runtime_error(fmt::format("role manager: {} operation not available through maintenance socket in maintenance mode", operation)));
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename T = void>
|
|
||||||
future<T> manager_not_ready_exception(std::string_view operation) {
|
|
||||||
return make_exception_future<T>(
|
|
||||||
std::runtime_error(fmt::format("role manager: {} operation not available because manager not ready yet (role operations not enabled)", operation)));
|
|
||||||
}
|
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::validate_operation(std::string_view name) const {
|
|
||||||
if (_is_maintenance_mode) {
|
|
||||||
return operation_not_available_in_maintenance_mode_exception(name);
|
|
||||||
}
|
|
||||||
if (!_std_mgr) {
|
|
||||||
return manager_not_ready_exception(name);
|
|
||||||
}
|
|
||||||
return make_ready_future<>();
|
return make_ready_future<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::create(std::string_view role_name, const role_config& c, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::ensure_superuser_is_created() {
|
||||||
auto f = validate_operation("CREATE");
|
return make_ready_future<>();
|
||||||
if (f.failed()) {
|
}
|
||||||
return f;
|
|
||||||
}
|
template<typename T = void>
|
||||||
return _std_mgr->create(role_name, c, mc);
|
future<T> operation_not_supported_exception(std::string_view operation) {
|
||||||
|
return make_exception_future<T>(
|
||||||
|
std::runtime_error(fmt::format("role manager: {} operation not supported through maintenance socket", operation)));
|
||||||
|
}
|
||||||
|
|
||||||
|
future<> maintenance_socket_role_manager::create(std::string_view role_name, const role_config&, ::service::group0_batch&) {
|
||||||
|
return operation_not_supported_exception("CREATE");
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::drop(std::string_view role_name, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::drop(std::string_view role_name, ::service::group0_batch& mc) {
|
||||||
auto f = validate_operation("DROP");
|
return operation_not_supported_exception("DROP");
|
||||||
if (f.failed()) {
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
return _std_mgr->drop(role_name, mc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::alter(std::string_view role_name, const role_config_update& u, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::alter(std::string_view role_name, const role_config_update&, ::service::group0_batch&) {
|
||||||
auto f = validate_operation("ALTER");
|
return operation_not_supported_exception("ALTER");
|
||||||
if (f.failed()) {
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
return _std_mgr->alter(role_name, u, mc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::grant(std::string_view grantee_name, std::string_view role_name, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::grant(std::string_view grantee_name, std::string_view role_name, ::service::group0_batch& mc) {
|
||||||
auto f = validate_operation("GRANT");
|
return operation_not_supported_exception("GRANT");
|
||||||
if (f.failed()) {
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
return _std_mgr->grant(grantee_name, role_name, mc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::revoke(std::string_view revokee_name, std::string_view role_name, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::revoke(std::string_view revokee_name, std::string_view role_name, ::service::group0_batch& mc) {
|
||||||
auto f = validate_operation("REVOKE");
|
return operation_not_supported_exception("REVOKE");
|
||||||
if (f.failed()) {
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
return _std_mgr->revoke(revokee_name, role_name, mc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<role_set> maintenance_socket_role_manager::query_granted(std::string_view grantee_name, recursive_role_query m) {
|
future<role_set> maintenance_socket_role_manager::query_granted(std::string_view grantee_name, recursive_role_query) {
|
||||||
auto f = validate_operation("QUERY GRANTED");
|
return operation_not_supported_exception<role_set>("QUERY GRANTED");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<role_set>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->query_granted(grantee_name, m);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<role_to_directly_granted_map> maintenance_socket_role_manager::query_all_directly_granted(::service::query_state& qs) {
|
future<role_to_directly_granted_map> maintenance_socket_role_manager::query_all_directly_granted(::service::query_state&) {
|
||||||
auto f = validate_operation("QUERY ALL DIRECTLY GRANTED");
|
return operation_not_supported_exception<role_to_directly_granted_map>("QUERY ALL DIRECTLY GRANTED");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<role_to_directly_granted_map>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->query_all_directly_granted(qs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<role_set> maintenance_socket_role_manager::query_all(::service::query_state& qs) {
|
future<role_set> maintenance_socket_role_manager::query_all(::service::query_state&) {
|
||||||
auto f = validate_operation("QUERY ALL");
|
return operation_not_supported_exception<role_set>("QUERY ALL");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<role_set>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->query_all(qs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<bool> maintenance_socket_role_manager::exists(std::string_view role_name) {
|
future<bool> maintenance_socket_role_manager::exists(std::string_view role_name) {
|
||||||
auto f = validate_operation("EXISTS");
|
return operation_not_supported_exception<bool>("EXISTS");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<bool>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->exists(role_name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<bool> maintenance_socket_role_manager::is_superuser(std::string_view role_name) {
|
future<bool> maintenance_socket_role_manager::is_superuser(std::string_view role_name) {
|
||||||
auto f = validate_operation("IS SUPERUSER");
|
return make_ready_future<bool>(true);
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<bool>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->is_superuser(role_name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<bool> maintenance_socket_role_manager::can_login(std::string_view role_name) {
|
future<bool> maintenance_socket_role_manager::can_login(std::string_view role_name) {
|
||||||
auto f = validate_operation("CAN LOGIN");
|
return make_ready_future<bool>(true);
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<bool>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->can_login(role_name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<std::optional<sstring>> maintenance_socket_role_manager::get_attribute(std::string_view role_name, std::string_view attribute_name, ::service::query_state& qs) {
|
future<std::optional<sstring>> maintenance_socket_role_manager::get_attribute(std::string_view role_name, std::string_view attribute_name, ::service::query_state&) {
|
||||||
auto f = validate_operation("GET ATTRIBUTE");
|
return operation_not_supported_exception<std::optional<sstring>>("GET ATTRIBUTE");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<std::optional<sstring>>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->get_attribute(role_name, attribute_name, qs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<role_manager::attribute_vals> maintenance_socket_role_manager::query_attribute_for_all(std::string_view attribute_name, ::service::query_state& qs) {
|
future<role_manager::attribute_vals> maintenance_socket_role_manager::query_attribute_for_all(std::string_view attribute_name, ::service::query_state&) {
|
||||||
auto f = validate_operation("QUERY ATTRIBUTE FOR ALL");
|
return operation_not_supported_exception<role_manager::attribute_vals>("QUERY ATTRIBUTE");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<role_manager::attribute_vals>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->query_attribute_for_all(attribute_name, qs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::set_attribute(std::string_view role_name, std::string_view attribute_name, std::string_view attribute_value, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::set_attribute(std::string_view role_name, std::string_view attribute_name, std::string_view attribute_value, ::service::group0_batch& mc) {
|
||||||
auto f = validate_operation("SET ATTRIBUTE");
|
return operation_not_supported_exception("SET ATTRIBUTE");
|
||||||
if (f.failed()) {
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
return _std_mgr->set_attribute(role_name, attribute_name, attribute_value, mc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> maintenance_socket_role_manager::remove_attribute(std::string_view role_name, std::string_view attribute_name, ::service::group0_batch& mc) {
|
future<> maintenance_socket_role_manager::remove_attribute(std::string_view role_name, std::string_view attribute_name, ::service::group0_batch& mc) {
|
||||||
auto f = validate_operation("REMOVE ATTRIBUTE");
|
return operation_not_supported_exception("REMOVE ATTRIBUTE");
|
||||||
if (f.failed()) {
|
|
||||||
return f;
|
|
||||||
}
|
|
||||||
return _std_mgr->remove_attribute(role_name, attribute_name, mc);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
future<std::vector<cql3::description>> maintenance_socket_role_manager::describe_role_grants() {
|
future<std::vector<cql3::description>> maintenance_socket_role_manager::describe_role_grants() {
|
||||||
auto f = validate_operation("DESCRIBE ROLE GRANTS");
|
return operation_not_supported_exception<std::vector<cql3::description>>("DESCRIBE SCHEMA WITH INTERNALS");
|
||||||
if (f.failed()) {
|
|
||||||
return make_exception_future<std::vector<cql3::description>>(f.get_exception());
|
|
||||||
}
|
|
||||||
return _std_mgr->describe_role_grants();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace auth
|
} // namespace auth
|
||||||
|
|||||||
@@ -11,7 +11,6 @@
|
|||||||
#include "auth/cache.hh"
|
#include "auth/cache.hh"
|
||||||
#include "auth/resource.hh"
|
#include "auth/resource.hh"
|
||||||
#include "auth/role_manager.hh"
|
#include "auth/role_manager.hh"
|
||||||
#include "auth/standard_role_manager.hh"
|
|
||||||
#include <seastar/core/future.hh>
|
#include <seastar/core/future.hh>
|
||||||
|
|
||||||
namespace cql3 {
|
namespace cql3 {
|
||||||
@@ -25,26 +24,13 @@ class raft_group0_client;
|
|||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
// This role manager is used by the maintenance socket. It has disabled all role management operations
|
extern const std::string_view maintenance_socket_role_manager_name;
|
||||||
// in maintenance mode. In normal mode it delegates all operations to a standard_role_manager,
|
|
||||||
// which is created on demand when the node joins the cluster.
|
// This role manager is used by the maintenance socket. It has disabled all role management operations to not depend on
|
||||||
|
// system_auth keyspace, which may be not yet created when the maintenance socket starts listening.
|
||||||
class maintenance_socket_role_manager final : public role_manager {
|
class maintenance_socket_role_manager final : public role_manager {
|
||||||
cql3::query_processor& _qp;
|
|
||||||
::service::raft_group0_client& _group0_client;
|
|
||||||
::service::migration_manager& _migration_manager;
|
|
||||||
cache& _cache;
|
|
||||||
std::optional<standard_role_manager> _std_mgr;
|
|
||||||
bool _is_maintenance_mode;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void set_maintenance_mode() override;
|
maintenance_socket_role_manager(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&) {}
|
||||||
|
|
||||||
// Ensures role management operations are enabled.
|
|
||||||
// It must be called once the node has joined the cluster.
|
|
||||||
// In the meantime all role management operations will fail.
|
|
||||||
future<> ensure_role_operations_are_enabled() override;
|
|
||||||
|
|
||||||
maintenance_socket_role_manager(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&);
|
|
||||||
|
|
||||||
virtual std::string_view qualified_java_name() const noexcept override;
|
virtual std::string_view qualified_java_name() const noexcept override;
|
||||||
|
|
||||||
@@ -56,21 +42,21 @@ public:
|
|||||||
|
|
||||||
virtual future<> ensure_superuser_is_created() override;
|
virtual future<> ensure_superuser_is_created() override;
|
||||||
|
|
||||||
virtual future<> create(std::string_view role_name, const role_config& c, ::service::group0_batch& mc) override;
|
virtual future<> create(std::string_view role_name, const role_config&, ::service::group0_batch&) override;
|
||||||
|
|
||||||
virtual future<> drop(std::string_view role_name, ::service::group0_batch& mc) override;
|
virtual future<> drop(std::string_view role_name, ::service::group0_batch& mc) override;
|
||||||
|
|
||||||
virtual future<> alter(std::string_view role_name, const role_config_update& u, ::service::group0_batch& mc) override;
|
virtual future<> alter(std::string_view role_name, const role_config_update&, ::service::group0_batch&) override;
|
||||||
|
|
||||||
virtual future<> grant(std::string_view grantee_name, std::string_view role_name, ::service::group0_batch& mc) override;
|
virtual future<> grant(std::string_view grantee_name, std::string_view role_name, ::service::group0_batch& mc) override;
|
||||||
|
|
||||||
virtual future<> revoke(std::string_view revokee_name, std::string_view role_name, ::service::group0_batch& mc) override;
|
virtual future<> revoke(std::string_view revokee_name, std::string_view role_name, ::service::group0_batch& mc) override;
|
||||||
|
|
||||||
virtual future<role_set> query_granted(std::string_view grantee_name, recursive_role_query m) override;
|
virtual future<role_set> query_granted(std::string_view grantee_name, recursive_role_query) override;
|
||||||
|
|
||||||
virtual future<role_to_directly_granted_map> query_all_directly_granted(::service::query_state& qs) override;
|
virtual future<role_to_directly_granted_map> query_all_directly_granted(::service::query_state&) override;
|
||||||
|
|
||||||
virtual future<role_set> query_all(::service::query_state& qs) override;
|
virtual future<role_set> query_all(::service::query_state&) override;
|
||||||
|
|
||||||
virtual future<bool> exists(std::string_view role_name) override;
|
virtual future<bool> exists(std::string_view role_name) override;
|
||||||
|
|
||||||
@@ -78,19 +64,15 @@ public:
|
|||||||
|
|
||||||
virtual future<bool> can_login(std::string_view role_name) override;
|
virtual future<bool> can_login(std::string_view role_name) override;
|
||||||
|
|
||||||
virtual future<std::optional<sstring>> get_attribute(std::string_view role_name, std::string_view attribute_name, ::service::query_state& qs) override;
|
virtual future<std::optional<sstring>> get_attribute(std::string_view role_name, std::string_view attribute_name, ::service::query_state&) override;
|
||||||
|
|
||||||
virtual future<role_manager::attribute_vals> query_attribute_for_all(std::string_view attribute_name, ::service::query_state& qs) override;
|
virtual future<role_manager::attribute_vals> query_attribute_for_all(std::string_view attribute_name, ::service::query_state&) override;
|
||||||
|
|
||||||
virtual future<> set_attribute(std::string_view role_name, std::string_view attribute_name, std::string_view attribute_value, ::service::group0_batch& mc) override;
|
virtual future<> set_attribute(std::string_view role_name, std::string_view attribute_name, std::string_view attribute_value, ::service::group0_batch& mc) override;
|
||||||
|
|
||||||
virtual future<> remove_attribute(std::string_view role_name, std::string_view attribute_name, ::service::group0_batch& mc) override;
|
virtual future<> remove_attribute(std::string_view role_name, std::string_view attribute_name, ::service::group0_batch& mc) override;
|
||||||
|
|
||||||
virtual future<std::vector<cql3::description>> describe_role_grants() override;
|
virtual future<std::vector<cql3::description>> describe_role_grants() override;
|
||||||
|
|
||||||
private:
|
|
||||||
future<> validate_operation(std::string_view name) const;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,9 +26,10 @@
|
|||||||
#include "cql3/untyped_result_set.hh"
|
#include "cql3/untyped_result_set.hh"
|
||||||
#include "utils/log.hh"
|
#include "utils/log.hh"
|
||||||
#include "service/migration_manager.hh"
|
#include "service/migration_manager.hh"
|
||||||
|
#include "utils/class_registrator.hh"
|
||||||
|
#include "replica/database.hh"
|
||||||
#include "cql3/query_processor.hh"
|
#include "cql3/query_processor.hh"
|
||||||
#include "db/config.hh"
|
#include "db/config.hh"
|
||||||
#include "db/system_keyspace.hh"
|
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
@@ -36,19 +37,41 @@ constexpr std::string_view password_authenticator_name("org.apache.cassandra.aut
|
|||||||
|
|
||||||
// name of the hash column.
|
// name of the hash column.
|
||||||
static constexpr std::string_view SALTED_HASH = "salted_hash";
|
static constexpr std::string_view SALTED_HASH = "salted_hash";
|
||||||
|
static constexpr std::string_view DEFAULT_USER_NAME = meta::DEFAULT_SUPERUSER_NAME;
|
||||||
|
static const sstring DEFAULT_USER_PASSWORD = sstring(meta::DEFAULT_SUPERUSER_NAME);
|
||||||
|
|
||||||
static logging::logger plogger("password_authenticator");
|
static logging::logger plogger("password_authenticator");
|
||||||
|
|
||||||
|
// To ensure correct initialization order, we unfortunately need to use a string literal.
|
||||||
|
static const class_registrator<
|
||||||
|
authenticator,
|
||||||
|
password_authenticator,
|
||||||
|
cql3::query_processor&,
|
||||||
|
::service::raft_group0_client&,
|
||||||
|
::service::migration_manager&,
|
||||||
|
cache&,
|
||||||
|
utils::alien_worker&> password_auth_reg("org.apache.cassandra.auth.PasswordAuthenticator");
|
||||||
|
|
||||||
static thread_local auto rng_for_salt = std::default_random_engine(std::random_device{}());
|
static thread_local auto rng_for_salt = std::default_random_engine(std::random_device{}());
|
||||||
|
|
||||||
|
static std::string_view get_config_value(std::string_view value, std::string_view def) {
|
||||||
|
return value.empty() ? def : value;
|
||||||
|
}
|
||||||
|
std::string password_authenticator::default_superuser(const db::config& cfg) {
|
||||||
|
return std::string(get_config_value(cfg.auth_superuser_name(), DEFAULT_USER_NAME));
|
||||||
|
}
|
||||||
|
|
||||||
password_authenticator::~password_authenticator() {
|
password_authenticator::~password_authenticator() {
|
||||||
}
|
}
|
||||||
|
|
||||||
password_authenticator::password_authenticator(cql3::query_processor& qp, ::service::raft_group0_client& g0, ::service::migration_manager& mm, cache& cache)
|
password_authenticator::password_authenticator(cql3::query_processor& qp, ::service::raft_group0_client& g0, ::service::migration_manager& mm, cache& cache, utils::alien_worker& hashing_worker)
|
||||||
: _qp(qp)
|
: _qp(qp)
|
||||||
, _group0_client(g0)
|
, _group0_client(g0)
|
||||||
, _migration_manager(mm)
|
, _migration_manager(mm)
|
||||||
, _cache(cache)
|
, _cache(cache)
|
||||||
, _stopped(make_ready_future<>())
|
, _stopped(make_ready_future<>())
|
||||||
|
, _superuser(default_superuser(qp.db().get_config()))
|
||||||
|
, _hashing_worker(hashing_worker)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
static bool has_salted_hash(const cql3::untyped_result_set_row& row) {
|
static bool has_salted_hash(const cql3::untyped_result_set_row& row) {
|
||||||
@@ -57,18 +80,76 @@ static bool has_salted_hash(const cql3::untyped_result_set_row& row) {
|
|||||||
|
|
||||||
sstring password_authenticator::update_row_query() const {
|
sstring password_authenticator::update_row_query() const {
|
||||||
return seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
|
return seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
meta::roles_table::name,
|
meta::roles_table::name,
|
||||||
SALTED_HASH,
|
SALTED_HASH,
|
||||||
meta::roles_table::role_col_name);
|
meta::roles_table::role_col_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const sstring legacy_table_name{"credentials"};
|
||||||
|
|
||||||
|
bool password_authenticator::legacy_metadata_exists() const {
|
||||||
|
return _qp.db().has_schema(meta::legacy::AUTH_KS, legacy_table_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
future<> password_authenticator::migrate_legacy_metadata() const {
|
||||||
|
plogger.info("Starting migration of legacy authentication metadata.");
|
||||||
|
static const sstring query = seastar::format("SELECT * FROM {}.{}", meta::legacy::AUTH_KS, legacy_table_name);
|
||||||
|
|
||||||
|
return _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::QUORUM,
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
cql3::query_processor::cache_internal::no).then([this](::shared_ptr<cql3::untyped_result_set> results) {
|
||||||
|
return do_for_each(*results, [this](const cql3::untyped_result_set_row& row) {
|
||||||
|
auto username = row.get_as<sstring>("username");
|
||||||
|
auto salted_hash = row.get_as<sstring>(SALTED_HASH);
|
||||||
|
static const auto query = seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
|
||||||
|
meta::legacy::AUTH_KS,
|
||||||
|
meta::roles_table::name,
|
||||||
|
SALTED_HASH,
|
||||||
|
meta::roles_table::role_col_name);
|
||||||
|
return _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
consistency_for_user(username),
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{std::move(salted_hash), username},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result();
|
||||||
|
}).finally([results] {});
|
||||||
|
}).then([] {
|
||||||
|
plogger.info("Finished migrating legacy authentication metadata.");
|
||||||
|
}).handle_exception([](std::exception_ptr ep) {
|
||||||
|
plogger.error("Encountered an error during migration!");
|
||||||
|
std::rethrow_exception(ep);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
future<> password_authenticator::legacy_create_default_if_missing() {
|
||||||
|
const auto exists = co_await legacy::default_role_row_satisfies(_qp, &has_salted_hash, _superuser);
|
||||||
|
if (exists) {
|
||||||
|
co_return;
|
||||||
|
}
|
||||||
|
std::string salted_pwd(get_config_value(_qp.db().get_config().auth_superuser_salted_password(), ""));
|
||||||
|
if (salted_pwd.empty()) {
|
||||||
|
salted_pwd = passwords::hash(DEFAULT_USER_PASSWORD, rng_for_salt, _scheme);
|
||||||
|
}
|
||||||
|
const auto query = seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
|
||||||
|
meta::legacy::AUTH_KS,
|
||||||
|
meta::roles_table::name,
|
||||||
|
SALTED_HASH,
|
||||||
|
meta::roles_table::role_col_name);
|
||||||
|
co_await _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
db::consistency_level::QUORUM,
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{salted_pwd, _superuser},
|
||||||
|
cql3::query_processor::cache_internal::no);
|
||||||
|
plogger.info("Created default superuser authentication record.");
|
||||||
|
}
|
||||||
|
|
||||||
future<> password_authenticator::maybe_create_default_password() {
|
future<> password_authenticator::maybe_create_default_password() {
|
||||||
auto needs_password = [this] () -> future<bool> {
|
auto needs_password = [this] () -> future<bool> {
|
||||||
if (default_superuser(_qp).empty()) {
|
const sstring query = seastar::format("SELECT * FROM {}.{} WHERE is_superuser = true ALLOW FILTERING", get_auth_ks_name(_qp), meta::roles_table::name);
|
||||||
co_return false;
|
|
||||||
}
|
|
||||||
const sstring query = seastar::format("SELECT * FROM {}.{} WHERE is_superuser = true ALLOW FILTERING", db::system_keyspace::NAME, meta::roles_table::name);
|
|
||||||
auto results = co_await _qp.execute_internal(query,
|
auto results = co_await _qp.execute_internal(query,
|
||||||
db::consistency_level::LOCAL_ONE,
|
db::consistency_level::LOCAL_ONE,
|
||||||
internal_distributed_query_state(), cql3::query_processor::cache_internal::yes);
|
internal_distributed_query_state(), cql3::query_processor::cache_internal::yes);
|
||||||
@@ -78,7 +159,7 @@ future<> password_authenticator::maybe_create_default_password() {
|
|||||||
bool has_default = false;
|
bool has_default = false;
|
||||||
bool has_superuser_with_password = false;
|
bool has_superuser_with_password = false;
|
||||||
for (auto& result : *results) {
|
for (auto& result : *results) {
|
||||||
if (result.get_as<sstring>(meta::roles_table::role_col_name) == default_superuser(_qp)) {
|
if (result.get_as<sstring>(meta::roles_table::role_col_name) == _superuser) {
|
||||||
has_default = true;
|
has_default = true;
|
||||||
}
|
}
|
||||||
if (has_salted_hash(result)) {
|
if (has_salted_hash(result)) {
|
||||||
@@ -99,12 +180,12 @@ future<> password_authenticator::maybe_create_default_password() {
|
|||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
// Set default superuser's password.
|
// Set default superuser's password.
|
||||||
std::string salted_pwd(_qp.db().get_config().auth_superuser_salted_password());
|
std::string salted_pwd(get_config_value(_qp.db().get_config().auth_superuser_salted_password(), ""));
|
||||||
if (salted_pwd.empty()) {
|
if (salted_pwd.empty()) {
|
||||||
co_return;
|
salted_pwd = passwords::hash(DEFAULT_USER_PASSWORD, rng_for_salt, _scheme);
|
||||||
}
|
}
|
||||||
const auto update_query = update_row_query();
|
const auto update_query = update_row_query();
|
||||||
co_await collect_mutations(_qp, batch, update_query, {salted_pwd, default_superuser(_qp)});
|
co_await collect_mutations(_qp, batch, update_query, {salted_pwd, _superuser});
|
||||||
co_await std::move(batch).commit(_group0_client, _as, get_raft_timeout());
|
co_await std::move(batch).commit(_group0_client, _as, get_raft_timeout());
|
||||||
plogger.info("Created default superuser authentication record.");
|
plogger.info("Created default superuser authentication record.");
|
||||||
}
|
}
|
||||||
@@ -137,14 +218,58 @@ future<> password_authenticator::start() {
|
|||||||
|
|
||||||
_stopped = do_after_system_ready(_as, [this] {
|
_stopped = do_after_system_ready(_as, [this] {
|
||||||
return async([this] {
|
return async([this] {
|
||||||
|
if (legacy_mode(_qp)) {
|
||||||
|
if (!_superuser_created_promise.available()) {
|
||||||
|
// Counterintuitively, we mark promise as ready before any startup work
|
||||||
|
// because wait_for_schema_agreement() below will block indefinitely
|
||||||
|
// without cluster majority. In that case, blocking node startup
|
||||||
|
// would lead to a cluster deadlock.
|
||||||
|
_superuser_created_promise.set_value();
|
||||||
|
}
|
||||||
|
_migration_manager.wait_for_schema_agreement(_qp.db().real_database(), db::timeout_clock::time_point::max(), &_as).get();
|
||||||
|
|
||||||
|
if (legacy::any_nondefault_role_row_satisfies(_qp, &has_salted_hash, _superuser).get()) {
|
||||||
|
if (legacy_metadata_exists()) {
|
||||||
|
plogger.warn("Ignoring legacy authentication metadata since nondefault data already exist.");
|
||||||
|
}
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (legacy_metadata_exists()) {
|
||||||
|
migrate_legacy_metadata().get();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
legacy_create_default_if_missing().get();
|
||||||
|
}
|
||||||
utils::get_local_injector().inject("password_authenticator_start_pause", utils::wait_for_message(5min)).get();
|
utils::get_local_injector().inject("password_authenticator_start_pause", utils::wait_for_message(5min)).get();
|
||||||
maybe_create_default_password_with_retries().get();
|
if (!legacy_mode(_qp)) {
|
||||||
if (!_superuser_created_promise.available()) {
|
maybe_create_default_password_with_retries().get();
|
||||||
_superuser_created_promise.set_value();
|
if (!_superuser_created_promise.available()) {
|
||||||
|
_superuser_created_promise.set_value();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (legacy_mode(_qp)) {
|
||||||
|
static const sstring create_roles_query = fmt::format(
|
||||||
|
"CREATE TABLE {}.{} ("
|
||||||
|
" {} text PRIMARY KEY,"
|
||||||
|
" can_login boolean,"
|
||||||
|
" is_superuser boolean,"
|
||||||
|
" member_of set<text>,"
|
||||||
|
" salted_hash text"
|
||||||
|
")",
|
||||||
|
meta::legacy::AUTH_KS,
|
||||||
|
meta::roles_table::name,
|
||||||
|
meta::roles_table::role_col_name);
|
||||||
|
return create_legacy_metadata_table_if_missing(
|
||||||
|
meta::roles_table::name,
|
||||||
|
_qp,
|
||||||
|
create_roles_query,
|
||||||
|
_migration_manager);
|
||||||
|
}
|
||||||
return make_ready_future<>();
|
return make_ready_future<>();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -154,6 +279,15 @@ future<> password_authenticator::stop() {
|
|||||||
return _stopped.handle_exception_type([] (const sleep_aborted&) { }).handle_exception_type([](const abort_requested_exception&) {});
|
return _stopped.handle_exception_type([] (const sleep_aborted&) { }).handle_exception_type([](const abort_requested_exception&) {});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
db::consistency_level password_authenticator::consistency_for_user(std::string_view role_name) {
|
||||||
|
// TODO: this is plain dung. Why treat hardcoded default special, but for example a user-created
|
||||||
|
// super user uses plain LOCAL_ONE?
|
||||||
|
if (role_name == DEFAULT_USER_NAME) {
|
||||||
|
return db::consistency_level::QUORUM;
|
||||||
|
}
|
||||||
|
return db::consistency_level::LOCAL_ONE;
|
||||||
|
}
|
||||||
|
|
||||||
std::string_view password_authenticator::qualified_java_name() const {
|
std::string_view password_authenticator::qualified_java_name() const {
|
||||||
return password_authenticator_name;
|
return password_authenticator_name;
|
||||||
}
|
}
|
||||||
@@ -183,23 +317,33 @@ future<authenticated_user> password_authenticator::authenticate(
|
|||||||
const sstring password = credentials.at(PASSWORD_KEY);
|
const sstring password = credentials.at(PASSWORD_KEY);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto role = _cache.get(username);
|
std::optional<sstring> salted_hash;
|
||||||
if (!role || role->salted_hash.empty()) {
|
if (legacy_mode(_qp)) {
|
||||||
throw exceptions::authentication_exception("Username and/or password are incorrect");
|
salted_hash = co_await get_password_hash(username);
|
||||||
|
if (!salted_hash) {
|
||||||
|
throw exceptions::authentication_exception("Username and/or password are incorrect");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
auto role = _cache.get(username);
|
||||||
|
if (!role || role->salted_hash.empty()) {
|
||||||
|
throw exceptions::authentication_exception("Username and/or password are incorrect");
|
||||||
|
}
|
||||||
|
salted_hash = role->salted_hash;
|
||||||
}
|
}
|
||||||
const auto& salted_hash = role->salted_hash;
|
const bool password_match = co_await _hashing_worker.submit<bool>([password = std::move(password), salted_hash] {
|
||||||
const bool password_match = co_await passwords::check(password, salted_hash);
|
return passwords::check(password, *salted_hash);
|
||||||
|
});
|
||||||
if (!password_match) {
|
if (!password_match) {
|
||||||
throw exceptions::authentication_exception("Username and/or password are incorrect");
|
throw exceptions::authentication_exception("Username and/or password are incorrect");
|
||||||
}
|
}
|
||||||
co_return username;
|
co_return username;
|
||||||
} catch (const std::system_error &) {
|
} catch (std::system_error &) {
|
||||||
std::throw_with_nested(exceptions::authentication_exception("Could not verify password"));
|
std::throw_with_nested(exceptions::authentication_exception("Could not verify password"));
|
||||||
} catch (const exceptions::request_execution_exception& e) {
|
} catch (exceptions::request_execution_exception& e) {
|
||||||
std::throw_with_nested(exceptions::authentication_exception(e.what()));
|
std::throw_with_nested(exceptions::authentication_exception(e.what()));
|
||||||
} catch (const exceptions::authentication_exception& e) {
|
} catch (exceptions::authentication_exception& e) {
|
||||||
std::throw_with_nested(e);
|
std::throw_with_nested(e);
|
||||||
} catch (const exceptions::unavailable_exception& e) {
|
} catch (exceptions::unavailable_exception& e) {
|
||||||
std::throw_with_nested(exceptions::authentication_exception(e.get_message()));
|
std::throw_with_nested(exceptions::authentication_exception(e.get_message()));
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
std::throw_with_nested(exceptions::authentication_exception("authentication failed"));
|
std::throw_with_nested(exceptions::authentication_exception("authentication failed"));
|
||||||
@@ -227,7 +371,16 @@ future<> password_authenticator::create(std::string_view role_name, const authen
|
|||||||
}
|
}
|
||||||
|
|
||||||
const auto query = update_row_query();
|
const auto query = update_row_query();
|
||||||
co_await collect_mutations(_qp, mc, query, {std::move(*maybe_hash), sstring(role_name)});
|
if (legacy_mode(_qp)) {
|
||||||
|
co_await _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
consistency_for_user(role_name),
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{std::move(*maybe_hash), sstring(role_name)},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result();
|
||||||
|
} else {
|
||||||
|
co_await collect_mutations(_qp, mc, query, {std::move(*maybe_hash), sstring(role_name)});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> password_authenticator::alter(std::string_view role_name, const authentication_options& options, ::service::group0_batch& mc) {
|
future<> password_authenticator::alter(std::string_view role_name, const authentication_options& options, ::service::group0_batch& mc) {
|
||||||
@@ -238,21 +391,38 @@ future<> password_authenticator::alter(std::string_view role_name, const authent
|
|||||||
const auto password = std::get<password_option>(*options.credentials).password;
|
const auto password = std::get<password_option>(*options.credentials).password;
|
||||||
|
|
||||||
const sstring query = seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
|
const sstring query = seastar::format("UPDATE {}.{} SET {} = ? WHERE {} = ?",
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
meta::roles_table::name,
|
meta::roles_table::name,
|
||||||
SALTED_HASH,
|
SALTED_HASH,
|
||||||
meta::roles_table::role_col_name);
|
meta::roles_table::role_col_name);
|
||||||
co_await collect_mutations(_qp, mc, query,
|
if (legacy_mode(_qp)) {
|
||||||
{passwords::hash(password, rng_for_salt, _scheme), sstring(role_name)});
|
co_await _qp.execute_internal(
|
||||||
|
query,
|
||||||
|
consistency_for_user(role_name),
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{passwords::hash(password, rng_for_salt, _scheme), sstring(role_name)},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result();
|
||||||
|
} else {
|
||||||
|
co_await collect_mutations(_qp, mc, query,
|
||||||
|
{passwords::hash(password, rng_for_salt, _scheme), sstring(role_name)});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
future<> password_authenticator::drop(std::string_view name, ::service::group0_batch& mc) {
|
future<> password_authenticator::drop(std::string_view name, ::service::group0_batch& mc) {
|
||||||
const sstring query = seastar::format("DELETE {} FROM {}.{} WHERE {} = ?",
|
const sstring query = seastar::format("DELETE {} FROM {}.{} WHERE {} = ?",
|
||||||
SALTED_HASH,
|
SALTED_HASH,
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
meta::roles_table::name,
|
meta::roles_table::name,
|
||||||
meta::roles_table::role_col_name);
|
meta::roles_table::role_col_name);
|
||||||
co_await collect_mutations(_qp, mc, query, {sstring(name)});
|
if (legacy_mode(_qp)) {
|
||||||
|
co_await _qp.execute_internal(
|
||||||
|
query, consistency_for_user(name),
|
||||||
|
internal_distributed_query_state(),
|
||||||
|
{sstring(name)},
|
||||||
|
cql3::query_processor::cache_internal::no).discard_result();
|
||||||
|
} else {
|
||||||
|
co_await collect_mutations(_qp, mc, query, {sstring(name)});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
future<custom_options> password_authenticator::query_custom_options(std::string_view role_name) const {
|
future<custom_options> password_authenticator::query_custom_options(std::string_view role_name) const {
|
||||||
@@ -271,13 +441,13 @@ future<std::optional<sstring>> password_authenticator::get_password_hash(std::st
|
|||||||
// that a map lookup string->statement is not gonna kill us much.
|
// that a map lookup string->statement is not gonna kill us much.
|
||||||
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ?",
|
const sstring query = seastar::format("SELECT {} FROM {}.{} WHERE {} = ?",
|
||||||
SALTED_HASH,
|
SALTED_HASH,
|
||||||
db::system_keyspace::NAME,
|
get_auth_ks_name(_qp),
|
||||||
meta::roles_table::name,
|
meta::roles_table::name,
|
||||||
meta::roles_table::role_col_name);
|
meta::roles_table::role_col_name);
|
||||||
|
|
||||||
const auto res = co_await _qp.execute_internal(
|
const auto res = co_await _qp.execute_internal(
|
||||||
query,
|
query,
|
||||||
db::consistency_level::LOCAL_ONE,
|
consistency_for_user(role_name),
|
||||||
internal_distributed_query_state(),
|
internal_distributed_query_state(),
|
||||||
{role_name},
|
{role_name},
|
||||||
cql3::query_processor::cache_internal::yes);
|
cql3::query_processor::cache_internal::yes);
|
||||||
|
|||||||
@@ -13,10 +13,12 @@
|
|||||||
#include <seastar/core/abort_source.hh>
|
#include <seastar/core/abort_source.hh>
|
||||||
#include <seastar/core/shared_future.hh>
|
#include <seastar/core/shared_future.hh>
|
||||||
|
|
||||||
|
#include "db/consistency_level_type.hh"
|
||||||
#include "auth/authenticator.hh"
|
#include "auth/authenticator.hh"
|
||||||
#include "auth/passwords.hh"
|
#include "auth/passwords.hh"
|
||||||
#include "auth/cache.hh"
|
#include "auth/cache.hh"
|
||||||
#include "service/raft/raft_group0_client.hh"
|
#include "service/raft/raft_group0_client.hh"
|
||||||
|
#include "utils/alien_worker.hh"
|
||||||
|
|
||||||
namespace db {
|
namespace db {
|
||||||
class config;
|
class config;
|
||||||
@@ -43,12 +45,17 @@ class password_authenticator : public authenticator {
|
|||||||
cache& _cache;
|
cache& _cache;
|
||||||
future<> _stopped;
|
future<> _stopped;
|
||||||
abort_source _as;
|
abort_source _as;
|
||||||
|
std::string _superuser; // default superuser name from the config (may or may not be present in roles table)
|
||||||
shared_promise<> _superuser_created_promise;
|
shared_promise<> _superuser_created_promise;
|
||||||
// We used to also support bcrypt, SHA-256, and MD5 (ref. scylladb#24524).
|
// We used to also support bcrypt, SHA-256, and MD5 (ref. scylladb#24524).
|
||||||
constexpr static auth::passwords::scheme _scheme = passwords::scheme::sha_512;
|
constexpr static auth::passwords::scheme _scheme = passwords::scheme::sha_512;
|
||||||
|
utils::alien_worker& _hashing_worker;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
password_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&);
|
static db::consistency_level consistency_for_user(std::string_view role_name);
|
||||||
|
static std::string default_superuser(const db::config&);
|
||||||
|
|
||||||
|
password_authenticator(cql3::query_processor&, ::service::raft_group0_client&, ::service::migration_manager&, cache&, utils::alien_worker&);
|
||||||
|
|
||||||
~password_authenticator();
|
~password_authenticator();
|
||||||
|
|
||||||
@@ -85,6 +92,12 @@ public:
|
|||||||
virtual future<> ensure_superuser_is_created() const override;
|
virtual future<> ensure_superuser_is_created() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool legacy_metadata_exists() const;
|
||||||
|
|
||||||
|
future<> migrate_legacy_metadata() const;
|
||||||
|
|
||||||
|
future<> legacy_create_default_if_missing();
|
||||||
|
|
||||||
future<> maybe_create_default_password();
|
future<> maybe_create_default_password();
|
||||||
future<> maybe_create_default_password_with_retries();
|
future<> maybe_create_default_password_with_retries();
|
||||||
|
|
||||||
|
|||||||
@@ -7,8 +7,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include "auth/passwords.hh"
|
#include "auth/passwords.hh"
|
||||||
#include "utils/crypt_sha512.hh"
|
|
||||||
#include <seastar/core/coroutine.hh>
|
|
||||||
|
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
|
|
||||||
@@ -23,46 +21,25 @@ static thread_local crypt_data tlcrypt = {};
|
|||||||
|
|
||||||
namespace detail {
|
namespace detail {
|
||||||
|
|
||||||
void verify_hashing_output(const char * res) {
|
|
||||||
if (!res || (res[0] == '*')) {
|
|
||||||
throw std::system_error(errno, std::system_category());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void verify_scheme(scheme scheme) {
|
void verify_scheme(scheme scheme) {
|
||||||
const sstring random_part_of_salt = "aaaabbbbccccdddd";
|
const sstring random_part_of_salt = "aaaabbbbccccdddd";
|
||||||
|
|
||||||
const sstring salt = sstring(prefix_for_scheme(scheme)) + random_part_of_salt;
|
const sstring salt = sstring(prefix_for_scheme(scheme)) + random_part_of_salt;
|
||||||
const char* e = crypt_r("fisk", salt.c_str(), &tlcrypt);
|
const char* e = crypt_r("fisk", salt.c_str(), &tlcrypt);
|
||||||
try {
|
|
||||||
verify_hashing_output(e);
|
if (e && (e[0] != '*')) {
|
||||||
} catch (const std::system_error& ex) {
|
return;
|
||||||
throw no_supported_schemes();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
throw no_supported_schemes();
|
||||||
}
|
}
|
||||||
|
|
||||||
sstring hash_with_salt(const sstring& pass, const sstring& salt) {
|
sstring hash_with_salt(const sstring& pass, const sstring& salt) {
|
||||||
auto res = crypt_r(pass.c_str(), salt.c_str(), &tlcrypt);
|
auto res = crypt_r(pass.c_str(), salt.c_str(), &tlcrypt);
|
||||||
verify_hashing_output(res);
|
if (!res || (res[0] == '*')) {
|
||||||
return res;
|
throw std::system_error(errno, std::system_category());
|
||||||
}
|
|
||||||
|
|
||||||
seastar::future<sstring> hash_with_salt_async(const sstring& pass, const sstring& salt) {
|
|
||||||
sstring res;
|
|
||||||
// Only SHA-512 hashes for passphrases shorter than 256 bytes can be computed using
|
|
||||||
// the __crypt_sha512 method. For other computations, we fall back to the
|
|
||||||
// crypt_r implementation from `<crypt.h>`, which can stall.
|
|
||||||
if (salt.starts_with(prefix_for_scheme(scheme::sha_512)) && pass.size() <= 255) {
|
|
||||||
char buf[128];
|
|
||||||
const char * output_ptr = co_await __crypt_sha512(pass.c_str(), salt.c_str(), buf);
|
|
||||||
verify_hashing_output(output_ptr);
|
|
||||||
res = output_ptr;
|
|
||||||
} else {
|
|
||||||
const char * output_ptr = crypt_r(pass.c_str(), salt.c_str(), &tlcrypt);
|
|
||||||
verify_hashing_output(output_ptr);
|
|
||||||
res = output_ptr;
|
|
||||||
}
|
}
|
||||||
co_return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string_view prefix_for_scheme(scheme c) noexcept {
|
std::string_view prefix_for_scheme(scheme c) noexcept {
|
||||||
@@ -81,9 +58,8 @@ no_supported_schemes::no_supported_schemes()
|
|||||||
: std::runtime_error("No allowed hashing schemes are supported on this system") {
|
: std::runtime_error("No allowed hashing schemes are supported on this system") {
|
||||||
}
|
}
|
||||||
|
|
||||||
seastar::future<bool> check(const sstring& pass, const sstring& salted_hash) {
|
bool check(const sstring& pass, const sstring& salted_hash) {
|
||||||
const auto pwd_hash = co_await detail::hash_with_salt_async(pass, salted_hash);
|
return detail::hash_with_salt(pass, salted_hash) == salted_hash;
|
||||||
co_return pwd_hash == salted_hash;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace auth::passwords
|
} // namespace auth::passwords
|
||||||
|
|||||||
@@ -11,7 +11,6 @@
|
|||||||
#include <random>
|
#include <random>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
|
||||||
#include <seastar/core/future.hh>
|
|
||||||
#include <seastar/core/sstring.hh>
|
#include <seastar/core/sstring.hh>
|
||||||
|
|
||||||
#include "seastarx.hh"
|
#include "seastarx.hh"
|
||||||
@@ -76,23 +75,11 @@ sstring generate_salt(RandomNumberEngine& g, scheme scheme) {
|
|||||||
|
|
||||||
///
|
///
|
||||||
/// Hash a password combined with an implementation-specific salt string.
|
/// Hash a password combined with an implementation-specific salt string.
|
||||||
/// Deprecated in favor of `hash_with_salt_async`. This function is still used
|
|
||||||
/// when generating password hashes for storage to ensure that
|
|
||||||
/// `hash_with_salt` and `hash_with_salt_async` produce identical results,
|
|
||||||
/// preserving backward compatibility.
|
|
||||||
///
|
///
|
||||||
/// \throws \ref std::system_error when an unexpected implementation-specific error occurs.
|
/// \throws \ref std::system_error when an unexpected implementation-specific error occurs.
|
||||||
///
|
///
|
||||||
sstring hash_with_salt(const sstring& pass, const sstring& salt);
|
sstring hash_with_salt(const sstring& pass, const sstring& salt);
|
||||||
|
|
||||||
///
|
|
||||||
/// Async version of `hash_with_salt` that returns a future.
|
|
||||||
/// If possible, hashing uses `coroutine::maybe_yield` to prevent reactor stalls.
|
|
||||||
///
|
|
||||||
/// \throws \ref std::system_error when an unexpected implementation-specific error occurs.
|
|
||||||
///
|
|
||||||
seastar::future<sstring> hash_with_salt_async(const sstring& pass, const sstring& salt);
|
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
///
|
///
|
||||||
@@ -120,6 +107,6 @@ sstring hash(const sstring& pass, RandomNumberEngine& g, scheme scheme) {
|
|||||||
///
|
///
|
||||||
/// \throws \ref std::system_error when an unexpected implementation-specific error occurs.
|
/// \throws \ref std::system_error when an unexpected implementation-specific error occurs.
|
||||||
///
|
///
|
||||||
seastar::future<bool> check(const sstring& pass, const sstring& salted_hash);
|
bool check(const sstring& pass, const sstring& salted_hash);
|
||||||
|
|
||||||
} // namespace auth::passwords
|
} // namespace auth::passwords
|
||||||
|
|||||||
38
auth/permissions_cache.cc
Normal file
38
auth/permissions_cache.cc
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017-present ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "auth/permissions_cache.hh"
|
||||||
|
|
||||||
|
#include <fmt/ranges.h>
|
||||||
|
#include "auth/authorizer.hh"
|
||||||
|
#include "auth/service.hh"
|
||||||
|
|
||||||
|
namespace auth {
|
||||||
|
|
||||||
|
permissions_cache::permissions_cache(const utils::loading_cache_config& c, service& ser, logging::logger& log)
|
||||||
|
: _cache(c, log, [&ser, &log](const key_type& k) {
|
||||||
|
log.debug("Refreshing permissions for {}", k.first);
|
||||||
|
return ser.get_uncached_permissions(k.first, k.second);
|
||||||
|
}) {
|
||||||
|
}
|
||||||
|
|
||||||
|
bool permissions_cache::update_config(utils::loading_cache_config c) {
|
||||||
|
return _cache.update_config(std::move(c));
|
||||||
|
}
|
||||||
|
|
||||||
|
void permissions_cache::reset() {
|
||||||
|
_cache.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
future<permission_set> permissions_cache::get(const role_or_anonymous& maybe_role, const resource& r) {
|
||||||
|
return do_with(key_type(maybe_role, r), [this](const auto& k) {
|
||||||
|
return _cache.get(k);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
66
auth/permissions_cache.hh
Normal file
66
auth/permissions_cache.hh
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2017-present ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <utility>
|
||||||
|
|
||||||
|
#include <fmt/core.h>
|
||||||
|
#include <seastar/core/future.hh>
|
||||||
|
|
||||||
|
#include "auth/permission.hh"
|
||||||
|
#include "auth/resource.hh"
|
||||||
|
#include "auth/role_or_anonymous.hh"
|
||||||
|
#include "utils/log.hh"
|
||||||
|
#include "utils/hash.hh"
|
||||||
|
#include "utils/loading_cache.hh"
|
||||||
|
|
||||||
|
namespace std {
|
||||||
|
|
||||||
|
inline std::ostream& operator<<(std::ostream& os, const pair<auth::role_or_anonymous, auth::resource>& p) {
|
||||||
|
fmt::print(os, "{{role: {}, resource: {}}}", p.first, p.second);
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace db {
|
||||||
|
class config;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace auth {
|
||||||
|
|
||||||
|
class service;
|
||||||
|
|
||||||
|
class permissions_cache final {
|
||||||
|
using cache_type = utils::loading_cache<
|
||||||
|
std::pair<role_or_anonymous, resource>,
|
||||||
|
permission_set,
|
||||||
|
1,
|
||||||
|
utils::loading_cache_reload_enabled::yes,
|
||||||
|
utils::simple_entry_size<permission_set>,
|
||||||
|
utils::tuple_hash>;
|
||||||
|
|
||||||
|
using key_type = typename cache_type::key_type;
|
||||||
|
|
||||||
|
cache_type _cache;
|
||||||
|
|
||||||
|
public:
|
||||||
|
explicit permissions_cache(const utils::loading_cache_config&, service&, logging::logger&);
|
||||||
|
|
||||||
|
future <> stop() {
|
||||||
|
return _cache.stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool update_config(utils::loading_cache_config);
|
||||||
|
void reset();
|
||||||
|
future<permission_set> get(const role_or_anonymous&, const resource&);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
@@ -112,11 +112,6 @@ public:
|
|||||||
|
|
||||||
virtual future<> stop() = 0;
|
virtual future<> stop() = 0;
|
||||||
|
|
||||||
///
|
|
||||||
/// Notify that the maintenance mode is starting.
|
|
||||||
///
|
|
||||||
virtual void set_maintenance_mode() {}
|
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Ensure that superuser role exists.
|
/// Ensure that superuser role exists.
|
||||||
///
|
///
|
||||||
@@ -124,11 +119,6 @@ public:
|
|||||||
///
|
///
|
||||||
virtual future<> ensure_superuser_is_created() = 0;
|
virtual future<> ensure_superuser_is_created() = 0;
|
||||||
|
|
||||||
///
|
|
||||||
/// Ensure role management operations are enabled. Some role managers may defer initialization.
|
|
||||||
///
|
|
||||||
virtual future<> ensure_role_operations_are_enabled() { return make_ready_future<>(); }
|
|
||||||
|
|
||||||
///
|
///
|
||||||
/// \returns an exceptional future with \ref role_already_exists for a role that has previously been created.
|
/// \returns an exceptional future with \ref role_already_exists for a role that has previously been created.
|
||||||
///
|
///
|
||||||
|
|||||||
68
auth/roles-metadata.cc
Normal file
68
auth/roles-metadata.cc
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2018-present ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SPDX-License-Identifier: LicenseRef-ScyllaDB-Source-Available-1.0
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "auth/roles-metadata.hh"
|
||||||
|
|
||||||
|
#include <seastar/core/format.hh>
|
||||||
|
#include <seastar/core/shared_ptr.hh>
|
||||||
|
#include <seastar/core/sstring.hh>
|
||||||
|
|
||||||
|
#include "auth/common.hh"
|
||||||
|
#include "cql3/query_processor.hh"
|
||||||
|
#include "cql3/untyped_result_set.hh"
|
||||||
|
|
||||||
|
namespace auth {
|
||||||
|
|
||||||
|
namespace legacy {
|
||||||
|
|
||||||
|
future<bool> default_role_row_satisfies(
|
||||||
|
cql3::query_processor& qp,
|
||||||
|
std::function<bool(const cql3::untyped_result_set_row&)> p,
|
||||||
|
std::optional<std::string> rolename) {
|
||||||
|
const sstring query = seastar::format("SELECT * FROM {}.{} WHERE {} = ?",
|
||||||
|
auth::meta::legacy::AUTH_KS,
|
||||||
|
meta::roles_table::name,
|
||||||
|
meta::roles_table::role_col_name);
|
||||||
|
|
||||||
|
for (auto cl : { db::consistency_level::ONE, db::consistency_level::QUORUM }) {
|
||||||
|
auto results = co_await qp.execute_internal(query, cl
|
||||||
|
, internal_distributed_query_state()
|
||||||
|
, {rolename.value_or(std::string(auth::meta::DEFAULT_SUPERUSER_NAME))}
|
||||||
|
, cql3::query_processor::cache_internal::yes
|
||||||
|
);
|
||||||
|
if (!results->empty()) {
|
||||||
|
co_return p(results->one());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
co_return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
future<bool> any_nondefault_role_row_satisfies(
|
||||||
|
cql3::query_processor& qp,
|
||||||
|
std::function<bool(const cql3::untyped_result_set_row&)> p,
|
||||||
|
std::optional<std::string> rolename) {
|
||||||
|
const sstring query = seastar::format("SELECT * FROM {}.{}", auth::meta::legacy::AUTH_KS, meta::roles_table::name);
|
||||||
|
|
||||||
|
auto results = co_await qp.execute_internal(query, db::consistency_level::QUORUM
|
||||||
|
, internal_distributed_query_state(), cql3::query_processor::cache_internal::no
|
||||||
|
);
|
||||||
|
if (results->empty()) {
|
||||||
|
co_return false;
|
||||||
|
}
|
||||||
|
static const sstring col_name = sstring(meta::roles_table::role_col_name);
|
||||||
|
|
||||||
|
co_return std::ranges::any_of(*results, [&](const cql3::untyped_result_set_row& row) {
|
||||||
|
auto superuser = rolename ? std::string_view(*rolename) : meta::DEFAULT_SUPERUSER_NAME;
|
||||||
|
const bool is_nondefault = row.get_as<sstring>(col_name) != superuser;
|
||||||
|
return is_nondefault && p(row);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace legacy
|
||||||
|
|
||||||
|
} // namespace auth
|
||||||
@@ -8,7 +8,18 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <optional>
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
#include <seastar/core/future.hh>
|
||||||
|
|
||||||
|
#include "seastarx.hh"
|
||||||
|
|
||||||
|
namespace cql3 {
|
||||||
|
class query_processor;
|
||||||
|
class untyped_result_set_row;
|
||||||
|
}
|
||||||
|
|
||||||
namespace auth {
|
namespace auth {
|
||||||
|
|
||||||
@@ -24,4 +35,26 @@ constexpr std::string_view role_col_name{"role", 4};
|
|||||||
|
|
||||||
} // namespace meta
|
} // namespace meta
|
||||||
|
|
||||||
|
namespace legacy {
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Check that the default role satisfies a predicate, or `false` if the default role does not exist.
|
||||||
|
///
|
||||||
|
future<bool> default_role_row_satisfies(
|
||||||
|
cql3::query_processor&,
|
||||||
|
std::function<bool(const cql3::untyped_result_set_row&)>,
|
||||||
|
std::optional<std::string> rolename = {}
|
||||||
|
);
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Check that any nondefault role satisfies a predicate. `false` if no nondefault roles exist.
|
||||||
|
///
|
||||||
|
future<bool> any_nondefault_role_row_satisfies(
|
||||||
|
cql3::query_processor&,
|
||||||
|
std::function<bool(const cql3::untyped_result_set_row&)>,
|
||||||
|
std::optional<std::string> rolename = {}
|
||||||
|
);
|
||||||
|
|
||||||
|
} // namespace legacy
|
||||||
|
|
||||||
} // namespace auth
|
} // namespace auth
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user