diff --git a/test/cluster/test_describe.py b/test/cluster/test_describe.py index 46a693efe7..ed56869b1a 100644 --- a/test/cluster/test_describe.py +++ b/test/cluster/test_describe.py @@ -8,6 +8,12 @@ import asyncio import pytest from test.cluster.util import new_test_keyspace, new_test_table from test.pylib.manager_client import ManagerClient +from test.pylib.util import wait_for +from cassandra.connection import UnixSocketEndPoint +from cassandra.policies import WhiteListRoundRobinPolicy +from test.cluster.conftest import cluster_con +from time import time +import os # The following test verifies that Scylla avoids making an oversized allocation # when generating a large create statement when performing a DESCRIBE statement. @@ -46,3 +52,41 @@ async def test_large_create_statement(manager: ManagerClient): matches = await log.grep("oversized allocation", from_mark=marker) assert len(matches) == 0 + +@pytest.mark.parametrize("mode", ["normal", "maintenance"]) +@pytest.mark.asyncio +async def test_describe_cluster_sanity(manager: ManagerClient, mode: str): + """ + Parametrized test that DESCRIBE CLUSTER returns correct cluster information + in both normal and maintenance modes. + + This test verifies that cluster metadata from gossiper is properly initialized + and the cluster name is consistent with system.local in both: + - normal mode: standard cluster operation + - maintenance mode: node isolated from the cluster + """ + + if mode == "normal": + await manager.server_add() + cql = manager.get_cql() + else: # maintenance mode + srv = await manager.server_add(config={"maintenance_mode": True}, connect_driver=False) + maintenance_socket_path = await manager.server_get_maintenance_socket_path(srv.server_id) + async def socket_exists(): + return True if os.path.exists(maintenance_socket_path) else None + await wait_for(socket_exists, time() + 30) + socket_endpoint = UnixSocketEndPoint(maintenance_socket_path) + cluster = cluster_con([socket_endpoint], load_balancing_policy=WhiteListRoundRobinPolicy([socket_endpoint])) + cql = cluster.connect() + + try: + system_local_results = await cql.run_async("SELECT cluster_name FROM system.local") + assert system_local_results[0].cluster_name != "" # sanity check + + describe_results = await cql.run_async("DESCRIBE CLUSTER") + assert describe_results[0].partitioner == 'org.apache.cassandra.dht.Murmur3Partitioner' + assert describe_results[0].snitch == 'org.apache.cassandra.locator.SimpleSnitch' + assert describe_results[0].cluster == system_local_results[0].cluster_name + finally: + if mode == "maintenance": + cluster.shutdown()