mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-19 16:15:07 +00:00
test: Add test_describe_cluster_sanity for DESCRIBE CLUSTER validation
Add parametrized integration test that verifies DESCRIBE CLUSTER returns correct values in both normal and maintenance modes: The parametrization keeps the validation logic (CQL queries and assertions) identical for both modes, while the setup phase is mode-specific. This ensures the same assertions apply to both cluster states: - partitioner is org.apache.cassandra.dht.Murmur3Partitioner - snitch is org.apache.cassandra.locator.SimpleSnitch - cluster name matches system.local cluster_name Signed-off-by: Pavel Emelyanov <xemul@scylladb.com> Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
@@ -8,6 +8,12 @@ import asyncio
|
||||
import pytest
|
||||
from test.cluster.util import new_test_keyspace, new_test_table
|
||||
from test.pylib.manager_client import ManagerClient
|
||||
from test.pylib.util import wait_for
|
||||
from cassandra.connection import UnixSocketEndPoint
|
||||
from cassandra.policies import WhiteListRoundRobinPolicy
|
||||
from test.cluster.conftest import cluster_con
|
||||
from time import time
|
||||
import os
|
||||
|
||||
# The following test verifies that Scylla avoids making an oversized allocation
|
||||
# when generating a large create statement when performing a DESCRIBE statement.
|
||||
@@ -46,3 +52,41 @@ async def test_large_create_statement(manager: ManagerClient):
|
||||
|
||||
matches = await log.grep("oversized allocation", from_mark=marker)
|
||||
assert len(matches) == 0
|
||||
|
||||
@pytest.mark.parametrize("mode", ["normal", "maintenance"])
|
||||
@pytest.mark.asyncio
|
||||
async def test_describe_cluster_sanity(manager: ManagerClient, mode: str):
|
||||
"""
|
||||
Parametrized test that DESCRIBE CLUSTER returns correct cluster information
|
||||
in both normal and maintenance modes.
|
||||
|
||||
This test verifies that cluster metadata from gossiper is properly initialized
|
||||
and the cluster name is consistent with system.local in both:
|
||||
- normal mode: standard cluster operation
|
||||
- maintenance mode: node isolated from the cluster
|
||||
"""
|
||||
|
||||
if mode == "normal":
|
||||
await manager.server_add()
|
||||
cql = manager.get_cql()
|
||||
else: # maintenance mode
|
||||
srv = await manager.server_add(config={"maintenance_mode": True}, connect_driver=False)
|
||||
maintenance_socket_path = await manager.server_get_maintenance_socket_path(srv.server_id)
|
||||
async def socket_exists():
|
||||
return True if os.path.exists(maintenance_socket_path) else None
|
||||
await wait_for(socket_exists, time() + 30)
|
||||
socket_endpoint = UnixSocketEndPoint(maintenance_socket_path)
|
||||
cluster = cluster_con([socket_endpoint], load_balancing_policy=WhiteListRoundRobinPolicy([socket_endpoint]))
|
||||
cql = cluster.connect()
|
||||
|
||||
try:
|
||||
system_local_results = await cql.run_async("SELECT cluster_name FROM system.local")
|
||||
assert system_local_results[0].cluster_name != "" # sanity check
|
||||
|
||||
describe_results = await cql.run_async("DESCRIBE CLUSTER")
|
||||
assert describe_results[0].partitioner == 'org.apache.cassandra.dht.Murmur3Partitioner'
|
||||
assert describe_results[0].snitch == 'org.apache.cassandra.locator.SimpleSnitch'
|
||||
assert describe_results[0].cluster == system_local_results[0].cluster_name
|
||||
finally:
|
||||
if mode == "maintenance":
|
||||
cluster.shutdown()
|
||||
|
||||
Reference in New Issue
Block a user