mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
refactor: remove serialization_max_chunk_size for cluster tests (#4316)
This commit is contained in:
parent
af04b558db
commit
8237d8fa81
1 changed files with 5 additions and 5 deletions
|
@ -1433,7 +1433,7 @@ async def test_migration_with_key_ttl(df_factory):
|
|||
assert await nodes[1].client.execute_command("stick k_sticky") == 0
|
||||
|
||||
|
||||
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes", "serialization_max_chunk_size": 0})
|
||||
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})
|
||||
async def test_network_disconnect_during_migration(df_factory):
|
||||
instances = [
|
||||
df_factory.create(port=next(next_port), admin_port=next(next_port)) for i in range(2)
|
||||
|
@ -1472,7 +1472,7 @@ async def test_network_disconnect_during_migration(df_factory):
|
|||
|
||||
await proxy.start()
|
||||
|
||||
await wait_for_status(nodes[0].admin_client, nodes[1].id, "FINISHED", 60)
|
||||
await wait_for_status(nodes[0].admin_client, nodes[1].id, "FINISHED", 300)
|
||||
nodes[0].migrations = []
|
||||
nodes[0].slots = []
|
||||
nodes[1].slots = [(0, 16383)]
|
||||
|
@ -1970,7 +1970,7 @@ async def test_cluster_migration_cancel(df_factory: DflyInstanceFactory):
|
|||
assert str(i) == await nodes[1].client.get(f"{{key50}}:{i}")
|
||||
|
||||
|
||||
@dfly_args({"proactor_threads": 2, "cluster_mode": "yes", "serialization_max_chunk_size": 0})
|
||||
@dfly_args({"proactor_threads": 2, "cluster_mode": "yes"})
|
||||
@pytest.mark.asyncio
|
||||
async def test_cluster_migration_huge_container(df_factory: DflyInstanceFactory):
|
||||
instances = [
|
||||
|
@ -2003,7 +2003,7 @@ async def test_cluster_migration_huge_container(df_factory: DflyInstanceFactory)
|
|||
await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes])
|
||||
|
||||
logging.debug("Waiting for migration to finish")
|
||||
await wait_for_status(nodes[0].admin_client, nodes[1].id, "FINISHED")
|
||||
await wait_for_status(nodes[0].admin_client, nodes[1].id, "FINISHED", 30)
|
||||
|
||||
target_data = await StaticSeeder.capture(nodes[1].client)
|
||||
assert source_data == target_data
|
||||
|
@ -2433,7 +2433,7 @@ async def test_cluster_memory_consumption_migration(df_factory: DflyInstanceFact
|
|||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes", "serialization_max_chunk_size": 0})
|
||||
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})
|
||||
async def test_migration_timeout_on_sync(df_factory: DflyInstanceFactory, df_seeder_factory):
|
||||
# Timeout set to 3 seconds because we must first saturate the socket before we get the timeout
|
||||
instances = [
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue