Compare commits

...

5 Commits

Author SHA1 Message Date
copilot-swe-agent[bot]
2295e692d1 Remove more redundant wait_for_cql_and_get_hosts calls
Removed redundant waits from:
- test_change_replication_factor_1_to_0.py
- test_read_repair.py
- test_topology_ops.py
- test_topology_ops_encrypted.py
- test_writes_to_previous_cdc_generations.py (2 occurrences)
- test_zero_token_nodes_topology_ops.py

All removed waits were immediately after server_add/servers_add calls
where the driver now automatically waits for new hosts.

Co-authored-by: tgrabiec <283695+tgrabiec@users.noreply.github.com>
2026-01-07 18:58:28 +00:00
copilot-swe-agent[bot]
0f9f138683 Remove redundant wait_for_cql_and_get_hosts calls after server_add/servers_add
Since manager_client.server_add() and servers_add() now automatically wait
for the driver to see new hosts, manual waits immediately after adding servers
are no longer needed when the return value is not used.

Removed redundant waits from:
- test_cdc_generation_publishing.py
- test_commitlog.py
- test_encryption.py (3 occurrences)
- test_sstable_set.py
- test_tablets.py
- test_tablets2.py (6 occurrences)

Kept waits where:
- Return value (hosts) is used for subsequent operations
- Wait is for ALL servers (old + new), not just newly added ones
- Wait occurs after server_start/restart (which don't auto-wait)

Co-authored-by: tgrabiec <283695+tgrabiec@users.noreply.github.com>
2026-01-07 18:55:11 +00:00
copilot-swe-agent[bot]
4dd337f9b6 Also wait for driver after initial connection in server_add/servers_add
Co-authored-by: tgrabiec <283695+tgrabiec@users.noreply.github.com>
2026-01-07 18:36:53 +00:00
copilot-swe-agent[bot]
76eaed0f61 Add driver wait in server_add and servers_add methods
Co-authored-by: tgrabiec <283695+tgrabiec@users.noreply.github.com>
2026-01-07 18:33:22 +00:00
copilot-swe-agent[bot]
633f94273f Initial plan 2026-01-07 18:28:32 +00:00
14 changed files with 15 additions and 23 deletions

View File

@@ -95,7 +95,6 @@ async def test_multiple_unpublished_cdc_generations(request, manager: ManagerCli
servers += await manager.servers_add(3)
cql = manager.get_cql()
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
gen_timestamps = set[datetime]()

View File

@@ -39,7 +39,6 @@ async def test_change_replication_factor_1_to_0(request: pytest.FixtureRequest,
await cql.run_async(f"create table {ks}.t (pk int primary key)")
srvs = await manager.running_servers()
await wait_for_cql_and_get_hosts(cql, srvs, time.time() + 60)
stmt = cql.prepare(f"SELECT * FROM {ks}.t where pk = ?")
stmt.consistency_level = ConsistencyLevel.LOCAL_QUORUM

View File

@@ -36,7 +36,6 @@ async def test_reboot(request, manager: ManagerClient):
'commitlog_sync': 'batch'
})
cql = manager.cql
await wait_for_cql_and_get_hosts(cql, [server_info], time.time() + 60)
logger.info("Node started")
random_tables = RandomTables(request.node.name, manager, "ks", 1)

View File

@@ -55,7 +55,6 @@ async def test_file_streaming_respects_encryption(manager: ManagerClient, workdi
await manager.api.disable_tablet_balancing(servers[0].ip_addr)
cql = manager.cql
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
cql.execute("CREATE KEYSPACE ks WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor': 1} AND tablets = {'initial': 1};")
cql.execute(f"""CREATE TABLE ks.t(pk text primary key) WITH scylla_encryption_options = {{
'cipher_algorithm' : 'AES/ECB/PKCS5Padding',
@@ -151,7 +150,6 @@ async def _smoke_test(manager: ManagerClient, key_provider: KeyProviderFactory,
servers: list[ServerInfo] = await manager.servers_add(servers_num = num_servers, config=cfg)
cql = manager.cql
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
async with await create_ks(manager) as ks:
# to reduce test time, create one cf for every alg/len combo we test.
@@ -436,7 +434,6 @@ async def test_system_auth_encryption(manager: ManagerClient, tmpdir):
servers: list[ServerInfo] = await manager.servers_add(servers_num = 1, config=cfg,
driver_connect_opts={'auth_provider': PlainTextAuthProvider(username='cassandra', password='cassandra')})
cql = manager.cql
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
async def grep_database_files(pattern: str, path: str, files: str, expect:bool):
pattern_found_counter = 0

View File

@@ -5,10 +5,9 @@
#
import pytest
import logging
import time
from test.pylib.manager_client import ManagerClient
from test.pylib.util import unique_name, wait_for_cql_and_get_hosts
from test.pylib.util import unique_name
from test.cluster.util import new_test_keyspace
@@ -52,8 +51,6 @@ async def test_not_enough_token_owners(manager: ManagerClient):
cql = manager.get_cql()
await wait_for_cql_and_get_hosts(cql, [server_a], time.time() + 60)
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 2} AND tablets = { 'enabled': true }") as ks_name:
await cql.run_async(f'CREATE TABLE {ks_name}.tbl (pk int PRIMARY KEY, v int)')
await cql.run_async(f'INSERT INTO {ks_name}.tbl (pk, v) VALUES (1, 1)')

View File

@@ -320,7 +320,6 @@ async def test_read_repair_with_trace_logging(request, manager):
cql = manager.get_cql()
srvs = await manager.running_servers()
await wait_for_cql_and_get_hosts(cql, srvs, time.time() + 60)
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 2};") as ks:
await cql.run_async(f"CREATE TABLE {ks}.t (pk bigint, ck bigint, c int, PRIMARY KEY (pk, ck));")

View File

@@ -27,7 +27,6 @@ async def test_partitioned_sstable_set(manager: ManagerClient, mode):
await manager.api.disable_tablet_balancing(server.ip_addr)
cql = manager.get_cql()
await wait_for_cql_and_get_hosts(cql, [server], time.time() + 60)
if mode == 'tablet':
ks = await create_new_test_keyspace(cql, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 1} AND tablets = {'initial': 4};")

View File

@@ -1212,7 +1212,6 @@ async def test_drop_keyspace_while_split(manager: ManagerClient):
s0_log = await manager.server_open_log(servers[0].server_id)
cql = manager.get_cql()
await wait_for_cql_and_get_hosts(cql, [servers[0]], time.time() + 60)
await manager.api.disable_tablet_balancing(servers[0].ip_addr)

View File

@@ -481,7 +481,6 @@ async def test_tablet_cleanup(manager: ManagerClient):
cql = manager.get_cql()
n_tablets = 32
n_partitions = 1000
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
await manager.servers_see_each_other(servers)
async with new_test_keyspace(manager, f"WITH replication = {{'class': 'NetworkTopologyStrategy', 'replication_factor': 1}} AND tablets = {{'initial': {n_tablets}}}") as ks:
await cql.run_async(f"CREATE TABLE {ks}.test (pk int PRIMARY KEY);")
@@ -554,7 +553,6 @@ async def test_tablet_cleanup_failure(manager: ManagerClient):
cql = manager.get_cql()
n_tablets = 1
n_partitions = 1000
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
await manager.servers_see_each_other(servers)
async with new_test_keyspace(manager, f"WITH replication = {{'class': 'NetworkTopologyStrategy', 'replication_factor': 1}} AND tablets = {{'initial': {n_tablets}}}") as ks:
await cql.run_async(f"CREATE TABLE {ks}.test (pk int PRIMARY KEY);")
@@ -1133,7 +1131,6 @@ async def test_tablet_storage_freeing(manager: ManagerClient):
servers = [await manager.server_add()]
await manager.api.disable_tablet_balancing(servers[0].ip_addr)
cql = manager.get_cql()
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
logger.info("Create a table with two tablets and populate it with a moderate amount of data.")
n_tablets = 2
@@ -1175,7 +1172,6 @@ async def test_schema_change_during_cleanup(manager: ManagerClient):
servers = [await manager.server_add()]
await manager.api.disable_tablet_balancing(servers[0].ip_addr)
cql = manager.get_cql()
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
cql = manager.get_cql()
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 1} AND tablets = {'initial': 1}") as ks:
@@ -1510,7 +1506,6 @@ async def test_tablet_cleanup_vs_snapshot_race(manager: ManagerClient):
cql = manager.get_cql()
n_tablets = 1
n_partitions = 1000
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
await manager.servers_see_each_other(servers)
async with new_test_keyspace(manager, f"WITH replication = {{'class': 'NetworkTopologyStrategy', 'replication_factor': 1}} AND tablets = {{'initial': {n_tablets}}}") as ks:
await cql.run_async(f"CREATE TABLE {ks}.test (pk int PRIMARY KEY);")

View File

@@ -39,7 +39,6 @@ async def test_topology_ops(request, manager: ManagerClient, tablets_enabled: bo
logger.info("Bootstrapping other nodes")
servers += await manager.servers_add(num_nodes, config=cfg)
await wait_for_cql_and_get_hosts(manager.cql, servers, time.time() + 60)
cql = await reconnect_driver(manager)
finish_writes = await start_writes(cql, rf, ConsistencyLevel.ONE)

View File

@@ -46,7 +46,6 @@ async def test_topology_ops_encrypted(request, manager: ManagerClient, tablets_e
logger.info("Bootstrapping other nodes")
servers += await manager.servers_add(num_nodes, config=cfg)
await wait_for_cql_and_get_hosts(manager.cql, servers, time.time() + 60)
cql = await reconnect_driver(manager)
finish_writes = await start_writes(cql, rf, ConsistencyLevel.ONE)

View File

@@ -67,7 +67,6 @@ async def test_writes_to_recent_previous_cdc_generations(request, manager: Manag
cql = manager.get_cql()
logger.info("Waiting for driver")
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
gen_timestamps = await wait_for_publishing_generations(cql, servers)
@@ -117,7 +116,6 @@ async def test_writes_to_old_previous_cdc_generation(request, manager: ManagerCl
cql = manager.get_cql()
logger.info("Waiting for driver")
await wait_for_cql_and_get_hosts(cql, servers, time.time() + 60)
gen_timestamps = await wait_for_publishing_generations(cql, servers)

View File

@@ -47,7 +47,6 @@ async def test_zero_token_nodes_topology_ops(manager: ManagerClient, tablets_ena
logging.info('Adding the third server')
server_c = await manager.server_add(config=normal_cfg, property_file=get_pf("r2"))
await wait_for_cql_and_get_hosts(manager.cql, [server_a, server_c], time.time() + 60)
finish_writes = await start_writes(manager.cql, 2, ConsistencyLevel.TWO)
logging.info('Adding the fourth server as zero-token')

View File

@@ -535,8 +535,15 @@ class ManagerClient:
if expected_error is None and connect_driver:
if self.cql:
self._driver_update()
# Wait for the driver to see the new host to avoid race conditions
# where CQL operations fail because the driver pool hasn't discovered
# the new host yet. Refs: https://github.com/scylladb/scylladb/pull/28040
await wait_for_cql_and_get_hosts(self.cql, [s_info], time() + 60)
elif start:
await self.driver_connect()
# Wait for the driver to see all hosts after initial connection
# Refs: https://github.com/scylladb/scylladb/pull/28040
await wait_for_cql_and_get_hosts(self.cql, [s_info], time() + 60)
return s_info
async def servers_add(self, servers_num: int = 1,
@@ -585,8 +592,15 @@ class ManagerClient:
if expected_error is None:
if self.cql:
self._driver_update()
# Wait for the driver to see the new hosts to avoid race conditions
# where CQL operations fail because the driver pool hasn't discovered
# the new hosts yet. Refs: https://github.com/scylladb/scylladb/pull/28040
await wait_for_cql_and_get_hosts(self.cql, s_infos, time() + 60)
elif start:
await self.driver_connect(**driver_connect_opts)
# Wait for the driver to see all hosts after initial connection
# Refs: https://github.com/scylladb/scylladb/pull/28040
await wait_for_cql_and_get_hosts(self.cql, s_infos, time() + 60)
return s_infos
async def remove_node(self, initiator_id: ServerNum, server_id: ServerNum,