From 92c3749c8c4055b1ab5560a3a8b5a85c39e4d08d Mon Sep 17 00:00:00 2001 From: adiholden Date: Sun, 5 Jan 2025 13:18:13 +0200 Subject: [PATCH] fix(tests): check cluster big snapshot in unit test (#4403) fix tests: check cluster big snapshot in unit test Signed-off-by: adi_holden --- src/server/cluster/cluster_family_test.cc | 12 +++++ tests/dragonfly/cluster_test.py | 59 ----------------------- 2 files changed, 12 insertions(+), 59 deletions(-) diff --git a/src/server/cluster/cluster_family_test.cc b/src/server/cluster/cluster_family_test.cc index 074ba2f5a..8c609a952 100644 --- a/src/server/cluster/cluster_family_test.cc +++ b/src/server/cluster/cluster_family_test.cc @@ -619,6 +619,18 @@ TEST_F(ClusterFamilyTest, ClusterFirstConfigCallDropsEntriesNotOwnedByNode) { ExpectConditionWithinTimeout([&]() { return CheckedInt({"dbsize"}) == 0; }); } +TEST_F(ClusterFamilyTest, SnapshotBiggerThanMaxMemory) { + InitWithDbFilename(); + ConfigSingleNodeCluster(GetMyId()); + + Run({"debug", "populate", "50000"}); + EXPECT_EQ(Run({"save", "df"}), "OK"); + + max_memory_limit = 10000; + auto save_info = service_->server_family().GetLastSaveInfo(); + EXPECT_EQ(Run({"dfly", "load", save_info.file_name}), "OK"); +} + TEST_F(ClusterFamilyTest, Keyslot) { // Example from Redis' command reference: https://redis.io/commands/cluster-keyslot/ EXPECT_THAT(Run({"cluster", "keyslot", "somekey"}), IntArg(11'058)); diff --git a/tests/dragonfly/cluster_test.py b/tests/dragonfly/cluster_test.py index 5d9f9e6a9..b90d48665 100644 --- a/tests/dragonfly/cluster_test.py +++ b/tests/dragonfly/cluster_test.py @@ -2499,62 +2499,3 @@ async def test_migration_timeout_on_sync(df_factory: DflyInstanceFactory, df_see await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes]) assert (await StaticSeeder.capture(nodes[1].client)) == start_capture - - -@pytest.mark.slow -@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"}) -async def test_snapshot_bigger_than_maxmemory(df_factory: DflyInstanceFactory, df_seeder_factory): - """ - Test load snapshot that is bigger than max_memory, but contains more slots and should be load without OOM: - - 1) Create snapshot - 2) split slots between 2 instances and reduce maxmemory - 3) load snapshot to both instances - - The result should be the same: instances contain all the data that was in snapshot - """ - dbfilename = f"dump_{tmp_file_name()}" - instances = [ - df_factory.create( - port=next(next_port), admin_port=next(next_port), maxmemory="3G", dbfilename=dbfilename - ), - df_factory.create(port=next(next_port), admin_port=next(next_port), maxmemory="1G"), - ] - df_factory.start_all(instances) - - nodes = [await create_node_info(n) for n in instances] - - nodes[0].slots = [(0, 16383)] - nodes[1].slots = [] - - logging.debug("Push initial config") - await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes]) - - logging.debug("create data") - seeder = df_seeder_factory.create( - keys=30000, val_size=10000, port=nodes[0].instance.port, cluster_mode=True - ) - await seeder.run(target_deviation=0.05) - capture = await seeder.capture() - - logging.debug("SAVE") - await nodes[0].client.execute_command("SAVE", "rdb") - - logging.debug("flushall") - for node in nodes: - await node.client.execute_command("flushall") - await node.client.execute_command("CONFIG SET maxmemory 1G") - - nodes[0].slots = [(0, 8191)] - nodes[1].slots = [(8192, 16383)] - - await push_config(json.dumps(generate_config(nodes)), [node.admin_client for node in nodes]) - - for node in nodes: - await node.client.execute_command("DFLY", "LOAD", f"{dbfilename}.rdb") - - assert await seeder.compare(capture, nodes[0].instance.port) - - # prevent saving during shutdown - for node in nodes: - await node.client.execute_command("flushall")