test.py: fix warnings
changes in this commit: 1)rename class from 'TestContext' to 'Context' so pytest will not consider this class as a test 2)extend pytest filterwarnings list to ignore warnings from external libs 3) use datetime.datetime.now(datetime.UTC) unstead datetime.datetime.utcnow() 4) use ResultSet.one() instead ResultSet[0] Fixes SCYLLADB-904 Fixes SCYLLADB-908 Related SCYLLADB-902 Closes scylladb/scylladb#28956
This commit is contained in:
committed by
Nadav Har'El
parent
d8b283e1fb
commit
755d528135
@@ -61,7 +61,7 @@ async def test_startup_no_auth_response(manager: ManagerClient, build_mode):
|
||||
session = c.connect()
|
||||
logging.info("Performing SELECT(*) FROM system.clients")
|
||||
res = session.execute("SELECT COUNT(*) FROM system.clients WHERE connection_stage = 'AUTHENTICATING' ALLOW FILTERING;")
|
||||
count = res[0][0]
|
||||
count = res.one()[0]
|
||||
logging.info(f"Observed {count} AUTHENTICATING connections...")
|
||||
if count >= num_connections/2:
|
||||
connections_observed = True
|
||||
|
||||
@@ -858,7 +858,8 @@ async def test_backup_broken_streaming(manager: ManagerClient, s3_storage):
|
||||
|
||||
res = cql.execute(f"SELECT COUNT(*) FROM {keyspace}.{table} BYPASS CACHE USING TIMEOUT 600s;")
|
||||
|
||||
assert res[0].count == expected_rows, f"number of rows after restore is incorrect: {res[0].count}"
|
||||
row = res.one()
|
||||
assert row.count == expected_rows, f"number of rows after restore is incorrect: {row.count}"
|
||||
log = await manager.server_open_log(server.server_id)
|
||||
await log.wait_for("fully contained SSTables to local node from object storage", timeout=10)
|
||||
# just make sure we had partially contained sstables as well
|
||||
|
||||
@@ -182,7 +182,7 @@ async def test_tombstone_gc_insert_flush(manager: ManagerClient):
|
||||
async def test_tablet_manual_repair_all_tokens(manager: ManagerClient):
|
||||
servers, cql, hosts, ks, table_id = await create_table_insert_data_for_repair(manager, fast_stats_refresh=False, disable_flush_cache_time=True)
|
||||
token = "all"
|
||||
now = datetime.datetime.utcnow()
|
||||
now = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
map1 = await load_tablet_repair_time(cql, hosts[0:1], table_id)
|
||||
|
||||
await guarantee_repair_time_next_second()
|
||||
|
||||
@@ -1324,7 +1324,7 @@ async def create_cluster(manager: ManagerClient, num_dcs: int, num_racks: int, n
|
||||
return servers
|
||||
|
||||
|
||||
class TestContext:
|
||||
class Context:
|
||||
def __init__(self, ks: str, table: str, rf: int, initial_tablets: int, num_keys: int):
|
||||
self.ks = ks
|
||||
self.table = table
|
||||
@@ -1347,7 +1347,7 @@ async def create_and_populate_table(manager: ManagerClient, rf: int = 3, initial
|
||||
ks = await create_new_test_keyspace(cql, f"WITH replication = {{'class': 'NetworkTopologyStrategy', 'replication_factor': {rf}}} AND tablets = {{'initial': {initial_tablets}}}")
|
||||
await cql.run_async(f"CREATE TABLE {ks}.{table} (pk int PRIMARY KEY, c int)")
|
||||
await asyncio.gather(*[cql.run_async(f"INSERT INTO {ks}.{table} (pk, c) VALUES ({k}, 1);") for k in range(num_keys)])
|
||||
yield TestContext(ks, table, rf, initial_tablets, num_keys)
|
||||
yield Context(ks, table, rf, initial_tablets, num_keys)
|
||||
finally:
|
||||
await cql.run_async(f"DROP KEYSPACE {ks}")
|
||||
|
||||
|
||||
@@ -53,6 +53,11 @@ filterwarnings =
|
||||
ignore::DeprecationWarning:importlib._bootstrap
|
||||
ignore::DeprecationWarning:botocore
|
||||
ignore::DeprecationWarning:pytest_elk_reporter
|
||||
ignore::sqlalchemy.exc.MovedIn20Warning:kmip.pie.sqltypes
|
||||
ignore::cryptography.utils.CryptographyDeprecationWarning:kmip
|
||||
ignore:TypeDecorator .* will not produce a cache key:sqlalchemy.exc.SAWarning:kmip
|
||||
ignore:Exception in thread[\s\S]*kmip[\s\S]*shutdown:pytest.PytestUnhandledThreadExceptionWarning
|
||||
|
||||
|
||||
tmp_path_retention_count = 1
|
||||
tmp_path_retention_policy = failed
|
||||
|
||||
Reference in New Issue
Block a user