fix: hiredis requires df to report version >7.4 (#4474)

* bump up redis version in info command
* add compatibility test
* bump up py dependencies
* fix warnings and deprecated functions

---------

Signed-off-by: kostas <kostas@dragonflydb.io>
This commit is contained in:
Kostas Kyrimis 2025-01-20 10:38:37 +02:00 committed by GitHub
parent 6f3c6e3d57
commit b017cdd1a0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 45 additions and 26 deletions

View file

@ -305,7 +305,7 @@ TEST_F(DflyEngineTestWithRegistry, Hello) {
EXPECT_THAT(
resp.GetVec(),
ElementsAre("server", "redis", "version", "7.2.0", "dragonfly_version",
ElementsAre("server", "redis", "version", "7.4.0", "dragonfly_version",
ArgType(RespExpr::STRING), "proto", IntArg(2), "id", ArgType(RespExpr::INT64),
"mode", testing::AnyOf("standalone", "cluster"), "role", "master"));
@ -313,7 +313,7 @@ TEST_F(DflyEngineTestWithRegistry, Hello) {
ASSERT_THAT(resp, ArrLen(14));
EXPECT_THAT(
resp.GetVec(),
ElementsAre("server", "redis", "version", "7.2.0", "dragonfly_version",
ElementsAre("server", "redis", "version", "7.4.0", "dragonfly_version",
ArgType(RespExpr::STRING), "proto", IntArg(3), "id", ArgType(RespExpr::INT64),
"mode", testing::AnyOf("standalone", "cluster"), "role", "master"));

View file

@ -230,7 +230,7 @@ using strings::HumanReadableNumBytes;
namespace {
const auto kRedisVersion = "7.2.0";
const auto kRedisVersion = "7.4.0";
using EngineFunc = void (ServerFamily::*)(CmdArgList args, const CommandContext&);

View file

@ -157,7 +157,7 @@ async def test_acl_cat_commands_multi_exec_squash(df_factory):
await client.execute_command(f"SET x{x} {x}")
await client.execute_command("EXEC")
await client.close()
await client.aclose()
client = aioredis.Redis(port=df.port, decode_responses=True)
# NOPERM while executing multi
@ -170,7 +170,7 @@ async def test_acl_cat_commands_multi_exec_squash(df_factory):
with pytest.raises(redis.exceptions.NoPermissionError):
await client.execute_command(f"SET x{x} {x}")
await client.close()
await client.aclose()
# NOPERM between multi and exec
admin_client = aioredis.Redis(port=df.port, decode_responses=True)
@ -198,8 +198,8 @@ async def test_acl_cat_commands_multi_exec_squash(df_factory):
logging.debug(f"Result is: {res}")
assert res[0].args[0] == "kk ACL rules changed between the MULTI and EXEC", res
await admin_client.close()
await client.close()
await admin_client.aclose()
await client.aclose()
# Testing acl commands
client = aioredis.Redis(port=df.port, decode_responses=True)
@ -598,7 +598,7 @@ async def test_default_user_bug(df_server):
client = df_server.client()
await client.execute_command("ACL SETUSER default -@all")
await client.close()
await client.aclose()
client = df_server.client()
@ -616,7 +616,7 @@ async def test_auth_resp3_bug(df_factory):
await client.execute_command("ACL SETUSER kostas +@all ON >tmp")
res = await client.execute_command("HELLO 3 AUTH kostas tmp")
assert res["server"] == "redis"
assert res["version"] == "7.2.0"
assert res["version"] == "7.4.0"
assert res["proto"] == 3
assert res["mode"] == "standalone"
assert res["role"] == "master"

View file

@ -161,4 +161,4 @@ async def test_cluster_mgr(df_factory):
for i in range(NODES):
assert run_cluster_mgr(["--action=detach", f"--target_port={replicas[i].port}"])
await check_cluster_data(client)
await client.close()
await client.aclose()

View file

@ -361,6 +361,7 @@ async def test_emulated_cluster_with_replicas(df_factory):
"connected": True,
"epoch": "0",
"flags": "myself,master",
"hostname": "",
"last_ping_sent": "0",
"last_pong_rcvd": "0",
"master_id": "-",
@ -375,6 +376,7 @@ async def test_emulated_cluster_with_replicas(df_factory):
"connected": True,
"epoch": "0",
"flags": "myself,master",
"hostname": "",
"last_ping_sent": "0",
"last_pong_rcvd": "0",
"master_id": "-",
@ -386,6 +388,7 @@ async def test_emulated_cluster_with_replicas(df_factory):
"connected": True,
"epoch": "0",
"flags": "slave",
"hostname": "",
"last_ping_sent": "0",
"last_pong_rcvd": "0",
"master_id": master_id,
@ -397,6 +400,7 @@ async def test_emulated_cluster_with_replicas(df_factory):
"connected": True,
"epoch": "0",
"flags": "slave",
"hostname": "",
"last_ping_sent": "0",
"last_pong_rcvd": "0",
"master_id": master_id,
@ -459,6 +463,7 @@ async def test_cluster_managed_service_info(df_factory):
"connected": True,
"epoch": "0",
"flags": "myself,master",
"hostname": "",
"last_ping_sent": "0",
"last_pong_rcvd": "0",
"master_id": "-",
@ -472,6 +477,7 @@ async def test_cluster_managed_service_info(df_factory):
"connected": True,
"epoch": "0",
"flags": "slave",
"hostname": "",
"last_ping_sent": "0",
"last_pong_rcvd": "0",
"master_id": master_id,
@ -1667,7 +1673,7 @@ async def test_cluster_fuzzymigration(
# Compare capture
assert await seeder.compare(capture, nodes[0].instance.port)
await asyncio.gather(*[c.close() for c in counter_connections])
await asyncio.gather(*[c.aclose() for c in counter_connections])
@dfly_args({"proactor_threads": 4, "cluster_mode": "yes"})

View file

@ -9,7 +9,12 @@ from threading import Thread
import random
import ssl
from redis import asyncio as aioredis
import redis as base_redis
import hiredis
from redis.cache import CacheConfig
from redis.exceptions import ConnectionError as redis_conn_error, ResponseError
import async_timeout
from dataclasses import dataclass
from aiohttp import ClientSession
@ -436,7 +441,7 @@ async def test_subscribers_with_active_publisher(df_server: DflyInstance, max_co
client = aioredis.Redis(connection_pool=async_pool)
for i in range(0, 2000):
await client.publish("channel", f"message-{i}")
await client.close()
await client.aclose()
async def channel_reader(channel: aioredis.client.PubSub):
for i in range(0, 150):
@ -774,7 +779,7 @@ async def test_reject_non_tls_connections_on_tls(with_tls_server_args, df_factor
client = server.client(password="XXX")
with pytest.raises((ResponseError)):
await client.dbsize()
await client.close()
await client.aclose()
client = server.admin_client(password="XXX")
assert await client.dbsize() == 0
@ -804,7 +809,7 @@ async def test_tls_reject(
client = server.client(**with_tls_client_args, ssl_cert_reqs=None)
await client.ping()
await client.close()
await client.aclose()
client = server.client(**with_tls_client_args)
with pytest.raises(redis_conn_error):
@ -1038,3 +1043,10 @@ async def test_lib_name_ver(async_client: aioredis.Redis):
assert len(list) == 1
assert list[0]["lib-name"] == "dragonfly"
assert list[0]["lib-ver"] == "1.2.3.4"
async def test_hiredis(df_factory):
server = df_factory.create(proactor_threads=1)
server.start()
client = base_redis.Redis(port=server.port, protocol=3, cache_config=CacheConfig())
client.ping()

View file

@ -260,7 +260,7 @@ async def test_disconnect_replica(
await c_replica.execute_command("REPLICAOF localhost " + str(master.port))
if crash_type == 0:
await asyncio.sleep(random.random() / 100 + 0.01)
await c_replica.close()
await c_replica.aclose()
replica.stop(kill=True)
else:
await wait_available_async(c_replica)
@ -281,7 +281,7 @@ async def test_disconnect_replica(
async def stable_sync(replica, c_replica, crash_type):
await asyncio.sleep(random.random() / 100)
await c_replica.close()
await c_replica.aclose()
replica.stop(kill=True)
await asyncio.gather(*(stable_sync(*args) for args in replicas_of_type(lambda t: t == 1)))
@ -309,7 +309,7 @@ async def test_disconnect_replica(
logging.debug("Check phase 3 replica survived")
for replica, c_replica, _ in replicas_of_type(lambda t: t == 2):
assert await c_replica.ping()
await c_replica.close()
await c_replica.aclose()
logging.debug("Stop streaming")
seeder.stop()
@ -2454,7 +2454,7 @@ async def test_empty_hash_map_replicate_old_master(df_factory):
await check_if_empty()
assert await old_c_master.execute_command(f"EXISTS foo") == 1
await old_c_master.close()
await old_c_master.aclose()
async def assert_body(client, result=1, state="online", node_role="slave"):
async with async_timeout.timeout(10):
@ -2478,7 +2478,7 @@ async def test_empty_hash_map_replicate_old_master(df_factory):
await assert_body(client_b, result=0)
index = index + 1
await client_b.close()
await client_b.aclose()
# This Test was intorduced in response to a bug when replicating empty hash maps with

View file

@ -1,14 +1,14 @@
async-timeout==4.0.2
async-timeout==4.0.3
attrs==22.1.0
Deprecated==1.2.13
iniconfig==1.1.1
packaging==21.3
packaging==23.1
pluggy==1.0.0
py==1.11.0
pyparsing==3.0.9
pytest==7.1.2
redis==5.0.0
tomli==2.0.2
redis==5.2.1
tomli==2.0.1
wrapt==1.14.1
pytest-asyncio==0.20.1
pytest-repeat==0.9.3
@ -26,3 +26,4 @@ pytest-icdiff==0.8
pytest-timeout==2.2.0
asyncio==3.4.3
fakeredis[json]==2.26.2
hiredis==2.4.0

View file

@ -597,7 +597,7 @@ async def test_big_value_serialization_memory_limit(df_factory, cont_type):
assert info["used_memory_peak_rss"] < (one_gb * 1.3)
await client.execute_command("FLUSHALL")
await client.close()
await client.aclose()
@dfly_args(

View file

@ -528,7 +528,7 @@ class DflySeeder:
async def _close_client(self, client):
if not self.cluster_mode:
await client.connection_pool.disconnect()
await client.close()
await client.aclose()
async def _capture_db(self, port, target_db, keys):
client = self._make_client(port=port, db=target_db)

View file

@ -41,7 +41,7 @@ async def post_to_redis(sem, db_name, index):
log.info(f"after pipe.execute {key_index}")
finally:
# log.info(f"before close {index}")
await redis_client.close()
await redis_client.aclose()
# log.info(f"after close {index} {len(results)}")