mirror of
https://github.com/scylladb/scylladb.git
synced 2026-05-12 19:02:12 +00:00
test/cluster/mv: Adjust test to RF-rack-valid keyspaces
We adjust the test in the directory so that all of the used keyspaces are RF-rack-valid throughout the their execution. Refs scylladb/scylladb#23428 Closes scylladb/scylladb#23490
This commit is contained in:
committed by
Piotr Dulikowski
parent
df64985a4e
commit
10589e966f
@@ -30,7 +30,12 @@ logger = logging.getLogger(__name__)
|
||||
@skip_mode('release', 'error injections are not supported in release mode')
|
||||
async def test_mv_tablets_empty_ip(manager: ManagerClient):
|
||||
cfg = {'tablets_mode_for_new_keyspaces': 'enabled'}
|
||||
servers = await manager.servers_add(4, config = cfg)
|
||||
servers = await manager.servers_add(4, config = cfg, property_file=[
|
||||
{"dc": "dc1", "rack": "r1"},
|
||||
{"dc": "dc1", "rack": "r1"},
|
||||
{"dc": "dc1", "rack": "r2"},
|
||||
{"dc": "dc1", "rack": "r3"}
|
||||
])
|
||||
|
||||
cql = manager.get_cql()
|
||||
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3}") as ks:
|
||||
@@ -62,11 +67,13 @@ async def test_mv_tablets_empty_ip(manager: ManagerClient):
|
||||
tasks = [asyncio.create_task(do_writes(i)) for i in range(concurrency)]
|
||||
|
||||
logger.info("Stopping the last node")
|
||||
await manager.server_stop_gracefully(servers[-1].server_id)
|
||||
replace_cfg = ReplaceConfig(replaced_id = servers[-1].server_id, reuse_ip_addr = False, use_host_id = True)
|
||||
replaced_node = servers[-1]
|
||||
await manager.server_stop_gracefully(replaced_node.server_id)
|
||||
replace_cfg = ReplaceConfig(replaced_id = replaced_node.server_id, reuse_ip_addr = False, use_host_id = True)
|
||||
|
||||
logger.info("Replacing the last node")
|
||||
await manager.server_add(replace_cfg=replace_cfg, config = cfg)
|
||||
await manager.server_add(replace_cfg=replace_cfg, config = cfg,
|
||||
property_file={"dc": replaced_node.datacenter, "rack": replaced_node.rack})
|
||||
|
||||
logger.info("Stopping writes")
|
||||
stop_event.set()
|
||||
|
||||
@@ -33,7 +33,12 @@ async def test_tablet_mv_replica_pairing_during_replace(manager: ManagerClient):
|
||||
the pairing would be shifted during replace.
|
||||
"""
|
||||
|
||||
servers = await manager.servers_add(4)
|
||||
servers = await manager.servers_add(4, property_file=[
|
||||
{"dc": "dc1", "rack": "r1"},
|
||||
{"dc": "dc1", "rack": "r1"},
|
||||
{"dc": "dc1", "rack": "r2"},
|
||||
{"dc": "dc1", "rack": "r2"}
|
||||
])
|
||||
cql = manager.get_cql()
|
||||
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 2} AND tablets = {'initial': 1}") as ks:
|
||||
await cql.run_async(f"CREATE TABLE {ks}.test (pk int PRIMARY KEY, c int)")
|
||||
@@ -62,7 +67,10 @@ async def test_tablet_mv_replica_pairing_during_replace(manager: ManagerClient):
|
||||
|
||||
logger.info('Replacing the node')
|
||||
replace_cfg = ReplaceConfig(replaced_id = server_to_replace.server_id, reuse_ip_addr = False, use_host_id = True)
|
||||
replace_task = asyncio.create_task(manager.server_add(replace_cfg))
|
||||
replace_task = asyncio.create_task(manager.server_add(replace_cfg, property_file={
|
||||
"dc": server_to_replace.datacenter,
|
||||
"rack": server_to_replace.rack
|
||||
}))
|
||||
|
||||
await coord_log.wait_for('tablet_transition_updates: waiting', from_mark=coord_mark)
|
||||
|
||||
|
||||
@@ -67,8 +67,10 @@ async def test_mv_admission_control_exception(manager: ManagerClient) -> None:
|
||||
@skip_mode('release', "error injections aren't enabled in release mode")
|
||||
async def test_mv_retried_writes_reach_all_replicas(manager: ManagerClient) -> None:
|
||||
node_count = 4
|
||||
servers = await manager.servers_add(node_count - 1, config={'error_injections_at_startup': ['update_backlog_immediately'], 'tablets_mode_for_new_keyspaces': 'enabled'})
|
||||
server = await manager.server_add(config={'error_injections_at_startup': ['view_update_limit', 'delay_before_remote_view_update', 'update_backlog_immediately'], 'tablets_mode_for_new_keyspaces': 'enabled'})
|
||||
cfg = {'error_injections_at_startup': ['update_backlog_immediately'], 'tablets_mode_for_new_keyspaces': 'enabled'}
|
||||
cfg_slow = {'error_injections_at_startup': ['view_update_limit', 'delay_before_remote_view_update', 'update_backlog_immediately'], 'tablets_mode_for_new_keyspaces': 'enabled'}
|
||||
servers = await manager.servers_add(node_count - 1, config=cfg, auto_rack_dc="dc1")
|
||||
server = await manager.server_add(config=cfg_slow, property_file={"dc": servers[0].datacenter, "rack": servers[0].rack})
|
||||
|
||||
cql, hosts = await manager.get_ready_cql(servers)
|
||||
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3} AND tablets = {'initial': 1}") as ks:
|
||||
|
||||
@@ -177,7 +177,12 @@ async def test_mv_update_on_pending_replica(manager: ManagerClient, intranode):
|
||||
# during this time.
|
||||
@pytest.mark.asyncio
|
||||
async def test_mv_write_to_dead_node(manager: ManagerClient):
|
||||
servers = await manager.servers_add(4)
|
||||
servers = await manager.servers_add(4, property_file=[
|
||||
{"dc": "dc1", "rack": "r1"},
|
||||
{"dc": "dc1", "rack": "r2"},
|
||||
{"dc": "dc1", "rack": "r3"},
|
||||
{"dc": "dc1", "rack": "r3"}
|
||||
])
|
||||
|
||||
cql = manager.get_cql()
|
||||
async with new_test_keyspace(manager, "WITH replication = {'class': 'NetworkTopologyStrategy', 'replication_factor': 3}") as ks:
|
||||
|
||||
Reference in New Issue
Block a user