Compare commits

..

2 Commits

Author SHA1 Message Date
Nadav Har'El
f23ba66351 Fix for Unused local variable
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2025-12-15 16:27:38 +02:00
Jenkins Promoter
d5641398f5 Update pgo profiles - aarch64 2025-12-15 05:16:31 +02:00
4 changed files with 4 additions and 59 deletions

View File

@@ -304,6 +304,8 @@ future<coordinator_result<>> batch_statement::execute_without_conditions(
}
}));
#endif
verify_batch_size(qp, mutations);
bool mutate_atomic = true;
if (_type != type::LOGGED) {
_stats.batches_pure_unlogged += 1;
@@ -311,7 +313,6 @@ future<coordinator_result<>> batch_statement::execute_without_conditions(
} else {
if (mutations.size() > 1) {
_stats.batches_pure_logged += 1;
verify_batch_size(qp, mutations);
} else {
_stats.batches_unlogged_from_logged += 1;
mutate_atomic = false;

View File

@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:80a47fe93866989aaf7e949168fcd308e95841e78c976a61f9eac20bfdd34d96
size 6448960
oid sha256:3cbe2dd05945f8fb76ebce2ea70864063d2b282c4d5080af1f290ead43321ab3
size 6444732

View File

@@ -182,7 +182,6 @@ def cleanup_all():
sys.stdout.flush()
shutil.copyfileobj(f, sys.stdout.buffer)
f.close()
scylla_set = set()
print(summary)
# We run the cleanup_all() function on exit for any reason - successful finish

View File

@@ -74,58 +74,3 @@ def test_batch_with_error(cql, table1):
# exceptions::exception_code::SERVER_ERROR, it gets converted to NoHostAvailable by the driver
with pytest.raises(NoHostAvailable, match="Value too large"):
cql.execute(generate_big_batch(table1, 100) + injection_key)
def test_unlogged_batch_size_not_checked(cql, test_keyspace):
"""Verifies that UNLOGGED batches are NOT subject to batch size limits.
Unlogged batches are applied as independent mutations and don't go through
the system.batchlog table, so their collective size is irrelevant.
This test should succeed even with a batch larger than the fail threshold.
"""
with new_test_table(cql, test_keyspace, "k int primary key, t text") as table:
# Create a batch larger than the fail threshold (1024 KB)
# This would fail for a logged batch, but should succeed for unlogged
statements = [f"INSERT INTO {table} (k, t) VALUES ({idx}, '{'x' * 743}')" for idx in range(1100)]
unlogged_batch = "BEGIN UNLOGGED BATCH\n" + "\n".join(statements) + "\n APPLY BATCH\n"
# This should not raise an exception
cql.execute(unlogged_batch)
# Verify the data was inserted
result = cql.execute(f"SELECT COUNT(*) FROM {table}")
assert result.one()[0] == 1100
def test_logged_multi_partition_batch_size_checked(cql, test_keyspace):
"""Verifies that LOGGED batches targeting multiple partitions ARE subject to batch size limits.
Logged multi-partition batches go through system.batchlog and must be size-checked.
"""
with new_test_table(cql, test_keyspace, "k int primary key, t text") as table:
# Create a batch larger than the fail threshold (1024 KB) with multiple partitions
statements = [f"INSERT INTO {table} (k, t) VALUES ({idx}, '{'x' * 743}')" for idx in range(1100)]
logged_batch = "BEGIN BATCH\n" + "\n".join(statements) + "\n APPLY BATCH\n"
# This should raise "Batch too large" exception
with pytest.raises(InvalidRequest, match="Batch too large"):
cql.execute(logged_batch)
def test_logged_single_partition_batch_size_not_checked(cql, test_keyspace):
"""Verifies that LOGGED batches targeting a single partition are NOT subject to batch size limits.
Logged single-partition batches don't go through system.batchlog,
so their collective size is not relevant.
"""
with new_test_table(cql, test_keyspace, "k int, c int, t text, primary key (k, c)") as table:
# Create a batch larger than the fail threshold (1024 KB) but all targeting the same partition
statements = [f"INSERT INTO {table} (k, c, t) VALUES (1, {idx}, '{'x' * 743}')" for idx in range(1100)]
logged_batch = "BEGIN BATCH\n" + "\n".join(statements) + "\n APPLY BATCH\n"
# This should not raise an exception since it's a single-partition batch
cql.execute(logged_batch)
# Verify the data was inserted
result = cql.execute(f"SELECT COUNT(*) FROM {table} WHERE k = 1")
assert result.one()[0] == 1100