mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-11 18:35:46 +02:00
tests: choose open ports randomally (#1569)
* Implement changes to the testing infrastructure to use random ports * Use psutil to find out the random ports
This commit is contained in:
parent
e81b5671b0
commit
b55316c0e6
15 changed files with 240 additions and 162 deletions
1
.github/workflows/regression-tests.yml
vendored
1
.github/workflows/regression-tests.yml
vendored
|
@ -28,7 +28,6 @@ jobs:
|
|||
|
||||
- name: Configure & Build
|
||||
run: |
|
||||
apt update && apt install -y pip jq
|
||||
cmake -B ${GITHUB_WORKSPACE}/build -DCMAKE_BUILD_TYPE=${{matrix.build-type}} -GNinja \
|
||||
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
ABSL_FLAG(std::string, cluster_announce_ip, "", "ip that cluster commands announce to the client");
|
||||
|
||||
ABSL_DECLARE_FLAG(uint32_t, port);
|
||||
ABSL_DECLARE_FLAG(int32_t, port);
|
||||
|
||||
namespace dfly {
|
||||
namespace {
|
||||
|
|
|
@ -60,7 +60,7 @@ extern char** environ;
|
|||
|
||||
using namespace std;
|
||||
|
||||
ABSL_DECLARE_FLAG(uint32_t, port);
|
||||
ABSL_DECLARE_FLAG(int32_t, port);
|
||||
ABSL_DECLARE_FLAG(uint32_t, memcached_port);
|
||||
ABSL_DECLARE_FLAG(uint16_t, admin_port);
|
||||
ABSL_DECLARE_FLAG(std::string, admin_bind);
|
||||
|
@ -354,7 +354,21 @@ bool RunEngine(ProactorPool* pool, AcceptServer* acceptor) {
|
|||
opts.disable_time_update = false;
|
||||
const auto& bind = GetFlag(FLAGS_bind);
|
||||
const char* bind_addr = bind.empty() ? nullptr : bind.c_str();
|
||||
auto port = GetFlag(FLAGS_port);
|
||||
|
||||
int32_t port = GetFlag(FLAGS_port);
|
||||
// The reason for this code is a bit silly. We want to provide a way to
|
||||
// bind any 'random' available port. The way to do that is to call
|
||||
// bind with the argument port 0. However we can't expose this functionality
|
||||
// as is to our users: Since giving --port=0 to redis DISABLES the network
|
||||
// interface that would break users' existing configurations in potentionally
|
||||
// unsafe ways. For that reason the user's --port=-1 means to us 'bind port 0'.
|
||||
if (port == -1) {
|
||||
port = 0;
|
||||
} else if (port < 0 || port > 65535) {
|
||||
LOG(ERROR) << "Bad port number " << port;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
auto mc_port = GetFlag(FLAGS_memcached_port);
|
||||
string unix_sock = GetFlag(FLAGS_unixsocket);
|
||||
bool unlink_uds = false;
|
||||
|
@ -428,6 +442,10 @@ bool RunEngine(ProactorPool* pool, AcceptServer* acceptor) {
|
|||
LOG(ERROR) << "Could not open port " << port << ", error: " << ec.message();
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (port == 0) {
|
||||
absl::SetFlag(&FLAGS_port, main_listener->socket()->LocalEndpoint().port());
|
||||
}
|
||||
}
|
||||
|
||||
if (mc_port > 0 && !tcp_disabled) {
|
||||
|
|
|
@ -65,7 +65,9 @@ struct MaxMemoryFlag {
|
|||
static bool AbslParseFlag(std::string_view in, MaxMemoryFlag* flag, std::string* err);
|
||||
static std::string AbslUnparseFlag(const MaxMemoryFlag& flag);
|
||||
|
||||
ABSL_FLAG(uint32_t, port, 6379, "Redis port");
|
||||
ABSL_FLAG(int32_t, port, 6379,
|
||||
"Redis port. 0 disables the port, -1 will bind on a random available port.");
|
||||
|
||||
ABSL_FLAG(uint32_t, memcached_port, 0, "Memcached port");
|
||||
|
||||
ABSL_FLAG(uint32_t, num_shards, 0, "Number of database shards, 0 - to choose automatically");
|
||||
|
|
|
@ -37,7 +37,7 @@ ABSL_FLAG(int, master_connect_timeout_ms, 20000,
|
|||
"Timeout for establishing connection to a replication master");
|
||||
ABSL_FLAG(int, master_reconnect_timeout_ms, 1000,
|
||||
"Timeout for re-establishing connection to a replication master");
|
||||
ABSL_DECLARE_FLAG(uint32_t, port);
|
||||
ABSL_DECLARE_FLAG(int32_t, port);
|
||||
|
||||
namespace dfly {
|
||||
|
||||
|
@ -828,8 +828,11 @@ void DflyShardReplica::StableSyncDflyAcksFb(Context* cntx) {
|
|||
DflyShardReplica::DflyShardReplica(ServerContext server_context, MasterContext master_context,
|
||||
uint32_t flow_id, Service* service,
|
||||
std::shared_ptr<MultiShardExecution> multi_shard_exe)
|
||||
: ProtocolClient(server_context), service_(*service), master_context_(master_context),
|
||||
multi_shard_exe_(multi_shard_exe), flow_id_(flow_id) {
|
||||
: ProtocolClient(server_context),
|
||||
service_(*service),
|
||||
master_context_(master_context),
|
||||
multi_shard_exe_(multi_shard_exe),
|
||||
flow_id_(flow_id) {
|
||||
use_multi_shard_exe_sync_ = GetFlag(FLAGS_enable_multi_shard_sync);
|
||||
executor_ = std::make_unique<JournalExecutor>(service);
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ ABSL_FLAG(ReplicaOfFlag, replicaof, ReplicaOfFlag{},
|
|||
"to replicate. "
|
||||
"Format should be <IPv4>:<PORT> or host:<PORT> or [<IPv6>]:<PORT>");
|
||||
|
||||
ABSL_DECLARE_FLAG(uint32_t, port);
|
||||
ABSL_DECLARE_FLAG(int32_t, port);
|
||||
ABSL_DECLARE_FLAG(bool, cache_mode);
|
||||
ABSL_DECLARE_FLAG(uint32_t, hz);
|
||||
ABSL_DECLARE_FLAG(bool, tls);
|
||||
|
|
|
@ -2,7 +2,10 @@ import pytest
|
|||
import time
|
||||
import subprocess
|
||||
import aiohttp
|
||||
import logging
|
||||
import os
|
||||
import psutil
|
||||
from typing import Optional
|
||||
from prometheus_client.parser import text_string_to_metric_families
|
||||
from redis.asyncio import Redis as RedisClient
|
||||
|
||||
|
@ -37,27 +40,60 @@ class DflyInstance:
|
|||
def __init__(self, params: DflyParams, args):
|
||||
self.args = args
|
||||
self.params = params
|
||||
self.proc = None
|
||||
self.proc: Optional[subprocess.Popen] = None
|
||||
self._client: Optional[RedisClient] = None
|
||||
|
||||
self.dynamic_port = False
|
||||
if self.params.existing_port:
|
||||
self._port = self.params.existing_port
|
||||
elif "port" in self.args:
|
||||
self._port = int(self.args["port"])
|
||||
else:
|
||||
# Tell DF to choose a random open port.
|
||||
# We'll find out what port it is using lsof.
|
||||
self.args["port"] = -1
|
||||
self._port = None
|
||||
self.dynamic_port = True
|
||||
|
||||
def __del__(self):
|
||||
assert self.proc == None
|
||||
|
||||
def client(self, *args, **kwargs) -> RedisClient:
|
||||
return RedisClient(port=self.port, *args, **kwargs)
|
||||
|
||||
def start(self):
|
||||
self._start()
|
||||
if self.params.existing_port:
|
||||
return
|
||||
|
||||
self._start()
|
||||
self._wait_for_server()
|
||||
|
||||
def _wait_for_server(self):
|
||||
# Give Dragonfly time to start and detect possible failure causes
|
||||
# Gdb starts slowly
|
||||
time.sleep(START_DELAY if not self.params.gdb else START_GDB_DELAY)
|
||||
delay = START_DELAY if not self.params.gdb else START_GDB_DELAY
|
||||
|
||||
self._check_status()
|
||||
# Wait until the process is listening on the port.
|
||||
s = time.time()
|
||||
while time.time() - s < delay:
|
||||
self._check_status()
|
||||
try:
|
||||
self.get_port_from_psutil()
|
||||
logging.debug(
|
||||
f"Process started after {time.time() - s:.2f} seconds. port={self.port}"
|
||||
)
|
||||
break
|
||||
except RuntimeError:
|
||||
time.sleep(0.05)
|
||||
else:
|
||||
raise DflyStartException("Process didn't start listening on port in time")
|
||||
|
||||
def stop(self, kill=False):
|
||||
proc, self.proc = self.proc, None
|
||||
if proc is None:
|
||||
return
|
||||
|
||||
print(f"Stopping instance on {self.port}")
|
||||
logging.debug(f"Stopping instance on {self._port}")
|
||||
try:
|
||||
if kill:
|
||||
proc.kill()
|
||||
|
@ -72,9 +108,13 @@ class DflyInstance:
|
|||
def _start(self):
|
||||
if self.params.existing_port:
|
||||
return
|
||||
|
||||
if self.dynamic_port:
|
||||
self._port = None
|
||||
|
||||
base_args = ["--use_zset_tree"] + [f"--{v}" for v in self.params.args]
|
||||
all_args = self.format_args(self.args) + base_args
|
||||
print(f"Starting instance on {self.port} with arguments {all_args} from {self.params.path}")
|
||||
logging.debug(f"Starting instance with arguments {all_args} from {self.params.path}")
|
||||
|
||||
run_cmd = [self.params.path, *all_args]
|
||||
if self.params.gdb:
|
||||
|
@ -92,21 +132,40 @@ class DflyInstance:
|
|||
|
||||
@property
|
||||
def port(self) -> int:
|
||||
if self.params.existing_port:
|
||||
return self.params.existing_port
|
||||
return int(self.args.get("port", "6379"))
|
||||
if self._port is None:
|
||||
self._port = self.get_port_from_psutil()
|
||||
return self._port
|
||||
|
||||
@property
|
||||
def admin_port(self) -> int:
|
||||
def admin_port(self) -> Optional[int]:
|
||||
if self.params.existing_admin_port:
|
||||
return self.params.existing_admin_port
|
||||
return int(self.args.get("admin_port", "16379"))
|
||||
if "admin_port" in self.args:
|
||||
return int(self.args["admin_port"])
|
||||
return None
|
||||
|
||||
@property
|
||||
def mc_port(self) -> int:
|
||||
def mc_port(self) -> Optional[int]:
|
||||
if self.params.existing_mc_port:
|
||||
return self.params.existing_mc_port
|
||||
return int(self.args.get("memcached_port", "11211"))
|
||||
if "memcached_port" in self.args:
|
||||
return int(self.args["memcached_port"])
|
||||
return None
|
||||
|
||||
def get_port_from_psutil(self) -> int:
|
||||
if self.proc is None:
|
||||
raise RuntimeError("port is not available yet")
|
||||
p = psutil.Process(self.proc.pid)
|
||||
ports = set()
|
||||
for connection in p.connections():
|
||||
if connection.status == "LISTEN":
|
||||
ports.add(connection.laddr.port)
|
||||
|
||||
ports.difference_update({self.admin_port, self.mc_port})
|
||||
assert len(ports) < 2, "Open ports detection found too many ports"
|
||||
if ports:
|
||||
return ports.pop()
|
||||
raise RuntimeError("Couldn't parse port")
|
||||
|
||||
@staticmethod
|
||||
def format_args(args):
|
||||
|
@ -154,11 +213,8 @@ class DflyInstanceFactory:
|
|||
for instance in instances:
|
||||
instance._start()
|
||||
|
||||
delay = START_DELAY if not self.params.gdb else START_GDB_DELAY
|
||||
time.sleep(delay * (1 + len(instances) / 2))
|
||||
|
||||
for instance in instances:
|
||||
instance._check_status()
|
||||
instance._wait_for_server()
|
||||
|
||||
def stop_all(self):
|
||||
"""Stop all lanched instances."""
|
||||
|
|
|
@ -37,8 +37,8 @@ class TestNotEmulated:
|
|||
|
||||
@dfly_args({"cluster_mode": "emulated"})
|
||||
class TestEmulated:
|
||||
def test_cluster_slots_command(self, cluster_client: redis.RedisCluster):
|
||||
expected = {(0, 16383): {"primary": ("127.0.0.1", 6379), "replicas": []}}
|
||||
def test_cluster_slots_command(self, df_server, cluster_client: redis.RedisCluster):
|
||||
expected = {(0, 16383): {"primary": ("127.0.0.1", df_server.port), "replicas": []}}
|
||||
res = cluster_client.execute_command("CLUSTER SLOTS")
|
||||
assert expected == res
|
||||
|
||||
|
@ -58,8 +58,8 @@ class TestEmulated:
|
|||
|
||||
@dfly_args({"cluster_mode": "emulated", "cluster_announce_ip": "127.0.0.2"})
|
||||
class TestEmulatedWithAnnounceIp:
|
||||
def test_cluster_slots_command(self, cluster_client: redis.RedisCluster):
|
||||
expected = {(0, 16383): {"primary": ("127.0.0.2", 6379), "replicas": []}}
|
||||
def test_cluster_slots_command(self, df_server, cluster_client: redis.RedisCluster):
|
||||
expected = {(0, 16383): {"primary": ("127.0.0.2", df_server.port), "replicas": []}}
|
||||
res = cluster_client.execute_command("CLUSTER SLOTS")
|
||||
assert expected == res
|
||||
|
||||
|
@ -145,10 +145,10 @@ async def test_cluster_info(async_client):
|
|||
|
||||
@dfly_args({"cluster_mode": "emulated", "cluster_announce_ip": "127.0.0.2"})
|
||||
@pytest.mark.asyncio
|
||||
async def test_cluster_nodes(async_client):
|
||||
async def test_cluster_nodes(df_server, async_client):
|
||||
res = await async_client.execute_command("CLUSTER NODES")
|
||||
assert len(res) == 1
|
||||
info = res["127.0.0.2:6379"]
|
||||
info = res[f"127.0.0.2:{df_server.port}"]
|
||||
assert res is not None
|
||||
assert info["connected"] == True
|
||||
assert info["epoch"] == "0"
|
||||
|
|
|
@ -25,3 +25,5 @@ async def test_maxclients(df_factory):
|
|||
assert [b"maxclients", b"3"] == await client1.execute_command("CONFIG GET maxclients")
|
||||
async with server.client() as client2:
|
||||
await client2.get("test")
|
||||
|
||||
server.stop()
|
||||
|
|
|
@ -5,7 +5,7 @@ import asyncio
|
|||
from redis import asyncio as aioredis
|
||||
|
||||
from . import dfly_multi_test_args, dfly_args, DflyStartException
|
||||
from .utility import batch_fill_data, gen_test_data
|
||||
from .utility import batch_fill_data, gen_test_data, EnvironCntx
|
||||
|
||||
|
||||
@dfly_multi_test_args({"keys_output_limit": 512}, {"keys_output_limit": 1024})
|
||||
|
@ -22,9 +22,8 @@ class TestKeys:
|
|||
@pytest.fixture(scope="function")
|
||||
def export_dfly_password() -> str:
|
||||
pwd = "flypwd"
|
||||
os.environ["DFLY_PASSWORD"] = pwd
|
||||
yield pwd
|
||||
del os.environ["DFLY_PASSWORD"]
|
||||
with EnvironCntx(DFLY_PASSWORD=pwd):
|
||||
yield pwd
|
||||
|
||||
|
||||
async def test_password(df_local_factory, export_dfly_password):
|
||||
|
@ -33,10 +32,10 @@ async def test_password(df_local_factory, export_dfly_password):
|
|||
|
||||
# Expect password form environment variable
|
||||
with pytest.raises(redis.exceptions.AuthenticationError):
|
||||
client = aioredis.Redis()
|
||||
async with aioredis.Redis(port=dfly.port) as client:
|
||||
await client.ping()
|
||||
async with aioredis.Redis(password=export_dfly_password, port=dfly.port) as client:
|
||||
await client.ping()
|
||||
client = aioredis.Redis(password=export_dfly_password)
|
||||
await client.ping()
|
||||
dfly.stop()
|
||||
|
||||
# --requirepass should take precedence over environment variable
|
||||
|
@ -46,10 +45,10 @@ async def test_password(df_local_factory, export_dfly_password):
|
|||
|
||||
# Expect password form flag
|
||||
with pytest.raises(redis.exceptions.AuthenticationError):
|
||||
client = aioredis.Redis(password=export_dfly_password)
|
||||
async with aioredis.Redis(port=dfly.port, password=export_dfly_password) as client:
|
||||
await client.ping()
|
||||
async with aioredis.Redis(password=requirepass, port=dfly.port) as client:
|
||||
await client.ping()
|
||||
client = aioredis.Redis(password=requirepass)
|
||||
await client.ping()
|
||||
dfly.stop()
|
||||
|
||||
|
||||
|
@ -87,35 +86,31 @@ async def test_txq_ooo(async_client: aioredis.Redis, df_server):
|
|||
|
||||
@dfly_args({"port": 6377})
|
||||
async def test_arg_from_environ_overwritten_by_cli(df_local_factory):
|
||||
os.environ["DFLY_port"] = "6378"
|
||||
dfly = df_local_factory.create()
|
||||
dfly.start()
|
||||
client = aioredis.Redis(port="6377")
|
||||
await client.ping()
|
||||
dfly.stop()
|
||||
del os.environ["DFLY_port"]
|
||||
with EnvironCntx(DFLY_port="6378"):
|
||||
dfly = df_local_factory.create()
|
||||
dfly.start()
|
||||
client = aioredis.Redis(port="6377")
|
||||
await client.ping()
|
||||
dfly.stop()
|
||||
|
||||
|
||||
async def test_arg_from_environ(df_local_factory):
|
||||
os.environ["DFLY_requirepass"] = "pass"
|
||||
dfly = df_local_factory.create()
|
||||
dfly.start()
|
||||
with EnvironCntx(DFLY_requirepass="pass"):
|
||||
dfly = df_local_factory.create()
|
||||
dfly.start()
|
||||
|
||||
# Expect password from environment variable
|
||||
with pytest.raises(redis.exceptions.AuthenticationError):
|
||||
client = aioredis.Redis()
|
||||
# Expect password from environment variable
|
||||
with pytest.raises(redis.exceptions.AuthenticationError):
|
||||
client = aioredis.Redis(port=dfly.port)
|
||||
await client.ping()
|
||||
|
||||
client = aioredis.Redis(password="pass", port=dfly.port)
|
||||
await client.ping()
|
||||
|
||||
client = aioredis.Redis(password="pass")
|
||||
await client.ping()
|
||||
dfly.stop()
|
||||
|
||||
del os.environ["DFLY_requirepass"]
|
||||
dfly.stop()
|
||||
|
||||
|
||||
async def test_unknown_dfly_env(df_local_factory, export_dfly_password):
|
||||
os.environ["DFLY_abcdef"] = "xyz"
|
||||
with pytest.raises(DflyStartException):
|
||||
dfly = df_local_factory.create()
|
||||
dfly.start()
|
||||
del os.environ["DFLY_abcdef"]
|
||||
with EnvironCntx(DFLY_abcdef="xyz"):
|
||||
with pytest.raises(DflyStartException):
|
||||
dfly = df_local_factory.create()
|
||||
dfly.start()
|
||||
|
|
|
@ -10,7 +10,6 @@ from . import DflyInstanceFactory, dfly_args
|
|||
import pymemcache
|
||||
import logging
|
||||
|
||||
BASE_PORT = 1111
|
||||
ADMIN_PORT = 1211
|
||||
|
||||
DISCONNECT_CRASH_FULL_SYNC = 0
|
||||
|
@ -43,19 +42,15 @@ replication_cases = [
|
|||
async def test_replication_all(
|
||||
df_local_factory, df_seeder_factory, t_master, t_replicas, seeder_config, from_admin_port
|
||||
):
|
||||
master = df_local_factory.create(
|
||||
port=BASE_PORT, admin_port=ADMIN_PORT, proactor_threads=t_master
|
||||
)
|
||||
master = df_local_factory.create(admin_port=ADMIN_PORT, proactor_threads=t_master)
|
||||
replicas = [
|
||||
df_local_factory.create(
|
||||
port=BASE_PORT + i + 1, admin_port=ADMIN_PORT + i + 1, proactor_threads=t
|
||||
)
|
||||
df_local_factory.create(admin_port=ADMIN_PORT + i + 1, proactor_threads=t)
|
||||
for i, t in enumerate(t_replicas)
|
||||
]
|
||||
|
||||
# Start master
|
||||
master.start()
|
||||
c_master = aioredis.Redis(port=master.port)
|
||||
c_master = master.client()
|
||||
|
||||
# Fill master with test data
|
||||
seeder = df_seeder_factory.create(port=master.port, **seeder_config)
|
||||
|
@ -64,7 +59,7 @@ async def test_replication_all(
|
|||
# Start replicas
|
||||
df_local_factory.start_all(replicas)
|
||||
|
||||
c_replicas = [aioredis.Redis(port=replica.port) for replica in replicas]
|
||||
c_replicas = [replica.client() for replica in replicas]
|
||||
|
||||
# Start data stream
|
||||
stream_task = asyncio.create_task(seeder.run(target_ops=3000))
|
||||
|
@ -94,9 +89,13 @@ async def test_replication_all(
|
|||
# Check data after stable state stream
|
||||
await check_all_replicas_finished(c_replicas, c_master)
|
||||
await check_data(seeder, replicas, c_replicas)
|
||||
await disconnect_clients(c_master, *c_replicas)
|
||||
|
||||
|
||||
async def check_replica_finished_exec(c_replica, c_master):
|
||||
role = await c_replica.execute_command("role")
|
||||
if role[0] != b"replica" or role[3] != b"stable_sync":
|
||||
return False
|
||||
syncid, r_offset = await c_replica.execute_command("DEBUG REPLICA OFFSET")
|
||||
m_offset = await c_master.execute_command("DFLY REPLICAOFFSET")
|
||||
|
||||
|
@ -167,9 +166,9 @@ async def test_disconnect_replica(
|
|||
t_disonnect,
|
||||
n_keys,
|
||||
):
|
||||
master = df_local_factory.create(port=BASE_PORT, proactor_threads=t_master)
|
||||
master = df_local_factory.create(proactor_threads=t_master)
|
||||
replicas = [
|
||||
(df_local_factory.create(port=BASE_PORT + i + 1, proactor_threads=t), crash_fs)
|
||||
(df_local_factory.create(proactor_threads=t), crash_fs)
|
||||
for i, (t, crash_fs) in enumerate(
|
||||
chain(
|
||||
zip(t_crash_fs, repeat(DISCONNECT_CRASH_FULL_SYNC)),
|
||||
|
@ -300,10 +299,7 @@ async def test_disconnect_master(
|
|||
df_local_factory, df_seeder_factory, t_master, t_replicas, n_random_crashes, n_keys
|
||||
):
|
||||
master = df_local_factory.create(port=1111, proactor_threads=t_master)
|
||||
replicas = [
|
||||
df_local_factory.create(port=BASE_PORT + i + 1, proactor_threads=t)
|
||||
for i, t in enumerate(t_replicas)
|
||||
]
|
||||
replicas = [df_local_factory.create(proactor_threads=t) for i, t in enumerate(t_replicas)]
|
||||
|
||||
df_local_factory.start_all(replicas)
|
||||
c_replicas = [aioredis.Redis(port=replica.port) for replica in replicas]
|
||||
|
@ -317,10 +313,10 @@ async def test_disconnect_master(
|
|||
async def start_master():
|
||||
await asyncio.sleep(0.2)
|
||||
master.start()
|
||||
c_master = aioredis.Redis(port=master.port)
|
||||
assert await c_master.ping()
|
||||
seeder.reset()
|
||||
await seeder.run(target_deviation=0.1)
|
||||
async with aioredis.Redis(port=master.port) as c_master:
|
||||
assert await c_master.ping()
|
||||
seeder.reset()
|
||||
await seeder.run(target_deviation=0.1)
|
||||
|
||||
await start_master()
|
||||
|
||||
|
@ -372,15 +368,12 @@ rotating_master_cases = [(4, [4, 4, 4, 4], dict(keys=2_000, dbcount=4))]
|
|||
async def test_rotating_masters(
|
||||
df_local_factory, df_seeder_factory, t_replica, t_masters, seeder_config
|
||||
):
|
||||
replica = df_local_factory.create(port=BASE_PORT, proactor_threads=t_replica)
|
||||
masters = [
|
||||
df_local_factory.create(port=BASE_PORT + i + 1, proactor_threads=t)
|
||||
for i, t in enumerate(t_masters)
|
||||
]
|
||||
seeders = [df_seeder_factory.create(port=m.port, **seeder_config) for m in masters]
|
||||
|
||||
replica = df_local_factory.create(proactor_threads=t_replica)
|
||||
masters = [df_local_factory.create(proactor_threads=t) for i, t in enumerate(t_masters)]
|
||||
df_local_factory.start_all([replica] + masters)
|
||||
|
||||
seeders = [df_seeder_factory.create(port=m.port, **seeder_config) for m in masters]
|
||||
|
||||
c_replica = aioredis.Redis(port=replica.port)
|
||||
|
||||
await asyncio.gather(*(seeder.run(target_deviation=0.1) for seeder in seeders))
|
||||
|
@ -420,11 +413,11 @@ async def test_cancel_replication_immediately(df_local_factory, df_seeder_factor
|
|||
"""
|
||||
COMMANDS_TO_ISSUE = 40
|
||||
|
||||
replica = df_local_factory.create(port=BASE_PORT)
|
||||
masters = [df_local_factory.create(port=BASE_PORT + i + 1) for i in range(4)]
|
||||
seeders = [df_seeder_factory.create(port=m.port) for m in masters]
|
||||
|
||||
replica = df_local_factory.create()
|
||||
masters = [df_local_factory.create() for i in range(4)]
|
||||
df_local_factory.start_all([replica] + masters)
|
||||
|
||||
seeders = [df_seeder_factory.create(port=m.port) for m in masters]
|
||||
c_replica = aioredis.Redis(port=replica.port)
|
||||
await asyncio.gather(*(seeder.run(target_deviation=0.1) for seeder in seeders))
|
||||
|
||||
|
@ -465,8 +458,8 @@ Check replica keys at the end.
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_flushall(df_local_factory):
|
||||
master = df_local_factory.create(port=BASE_PORT, proactor_threads=4)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, proactor_threads=2)
|
||||
master = df_local_factory.create(proactor_threads=4)
|
||||
replica = df_local_factory.create(proactor_threads=2)
|
||||
|
||||
master.start()
|
||||
replica.start()
|
||||
|
@ -519,8 +512,8 @@ async def test_rewrites(df_local_factory):
|
|||
CLOSE_TIMESTAMP = int(time.time()) + 100
|
||||
CLOSE_TIMESTAMP_MS = CLOSE_TIMESTAMP * 1000
|
||||
|
||||
master = df_local_factory.create(port=BASE_PORT)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1)
|
||||
master = df_local_factory.create()
|
||||
replica = df_local_factory.create()
|
||||
|
||||
master.start()
|
||||
replica.start()
|
||||
|
@ -693,8 +686,8 @@ Test automatic replication of expiry.
|
|||
@dfly_args({"proactor_threads": 4})
|
||||
@pytest.mark.asyncio
|
||||
async def test_expiry(df_local_factory, n_keys=1000):
|
||||
master = df_local_factory.create(port=BASE_PORT)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, logtostdout=True)
|
||||
master = df_local_factory.create()
|
||||
replica = df_local_factory.create(logtostdout=True)
|
||||
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
||||
|
@ -725,14 +718,14 @@ async def test_expiry(df_local_factory, n_keys=1000):
|
|||
# send more traffic for differnt dbs while keys are expired
|
||||
for i in range(8):
|
||||
is_multi = i % 2
|
||||
c_master_db = aioredis.Redis(port=master.port, db=i)
|
||||
pipe = c_master_db.pipeline(transaction=is_multi)
|
||||
# Set simple keys n_keys..n_keys*2 on master
|
||||
start_key = n_keys * (i + 1)
|
||||
end_key = start_key + n_keys
|
||||
batch_fill_data(client=pipe, gen=gen_test_data(end_key, start_key), batch_size=20)
|
||||
async with aioredis.Redis(port=master.port, db=i) as c_master_db:
|
||||
pipe = c_master_db.pipeline(transaction=is_multi)
|
||||
# Set simple keys n_keys..n_keys*2 on master
|
||||
start_key = n_keys * (i + 1)
|
||||
end_key = start_key + n_keys
|
||||
batch_fill_data(client=pipe, gen=gen_test_data(end_key, start_key), batch_size=20)
|
||||
|
||||
await pipe.execute()
|
||||
await pipe.execute()
|
||||
|
||||
# Wait for master to expire keys
|
||||
await asyncio.sleep(3.0)
|
||||
|
@ -802,11 +795,8 @@ return 'OK'
|
|||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("t_master, t_replicas, num_ops, num_keys, num_par, flags", script_cases)
|
||||
async def test_scripts(df_local_factory, t_master, t_replicas, num_ops, num_keys, num_par, flags):
|
||||
master = df_local_factory.create(port=BASE_PORT, proactor_threads=t_master)
|
||||
replicas = [
|
||||
df_local_factory.create(port=BASE_PORT + i + 1, proactor_threads=t)
|
||||
for i, t in enumerate(t_replicas)
|
||||
]
|
||||
master = df_local_factory.create(proactor_threads=t_master)
|
||||
replicas = [df_local_factory.create(proactor_threads=t) for i, t in enumerate(t_replicas)]
|
||||
|
||||
df_local_factory.start_all([master] + replicas)
|
||||
|
||||
|
@ -840,9 +830,9 @@ async def test_scripts(df_local_factory, t_master, t_replicas, num_ops, num_keys
|
|||
async def test_auth_master(df_local_factory, n_keys=20):
|
||||
masterpass = "requirepass"
|
||||
replicapass = "replicapass"
|
||||
master = df_local_factory.create(port=BASE_PORT, requirepass=masterpass)
|
||||
master = df_local_factory.create(requirepass=masterpass)
|
||||
replica = df_local_factory.create(
|
||||
port=BASE_PORT + 1, logtostdout=True, masterauth=masterpass, requirepass=replicapass
|
||||
logtostdout=True, masterauth=masterpass, requirepass=replicapass
|
||||
)
|
||||
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
@ -872,8 +862,8 @@ SCRIPT_TEMPLATE = "return {}"
|
|||
|
||||
@dfly_args({"proactor_threads": 2})
|
||||
async def test_script_transfer(df_local_factory):
|
||||
master = df_local_factory.create(port=BASE_PORT)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1)
|
||||
master = df_local_factory.create()
|
||||
replica = df_local_factory.create()
|
||||
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
||||
|
@ -906,8 +896,8 @@ async def test_script_transfer(df_local_factory):
|
|||
@dfly_args({"proactor_threads": 4})
|
||||
@pytest.mark.asyncio
|
||||
async def test_role_command(df_local_factory, n_keys=20):
|
||||
master = df_local_factory.create(port=BASE_PORT)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, logtostdout=True)
|
||||
master = df_local_factory.create()
|
||||
replica = df_local_factory.create(logtostdout=True)
|
||||
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
||||
|
@ -979,10 +969,8 @@ async def assert_lag_condition(inst, client, condition):
|
|||
@dfly_args({"proactor_threads": 2})
|
||||
@pytest.mark.asyncio
|
||||
async def test_replication_info(df_local_factory, df_seeder_factory, n_keys=2000):
|
||||
master = df_local_factory.create(port=BASE_PORT)
|
||||
replica = df_local_factory.create(
|
||||
port=BASE_PORT + 1, logtostdout=True, replication_acks_interval=100
|
||||
)
|
||||
master = df_local_factory.create()
|
||||
replica = df_local_factory.create(logtostdout=True, replication_acks_interval=100)
|
||||
df_local_factory.start_all([master, replica])
|
||||
c_master = aioredis.Redis(port=master.port)
|
||||
c_replica = aioredis.Redis(port=replica.port)
|
||||
|
@ -1014,8 +1002,8 @@ More details in https://github.com/dragonflydb/dragonfly/issues/1231
|
|||
@pytest.mark.asyncio
|
||||
@pytest.mark.slow
|
||||
async def test_flushall_in_full_sync(df_local_factory, df_seeder_factory):
|
||||
master = df_local_factory.create(port=BASE_PORT, proactor_threads=4, logtostdout=True)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, proactor_threads=2, logtostdout=True)
|
||||
master = df_local_factory.create(proactor_threads=4, logtostdout=True)
|
||||
replica = df_local_factory.create(proactor_threads=2, logtostdout=True)
|
||||
|
||||
# Start master
|
||||
master.start()
|
||||
|
@ -1083,8 +1071,8 @@ redis.call('SET', 'A', 'ErrroR')
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_readonly_script(df_local_factory):
|
||||
master = df_local_factory.create(port=BASE_PORT, proactor_threads=2, logtostdout=True)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, proactor_threads=2, logtostdout=True)
|
||||
master = df_local_factory.create(proactor_threads=2, logtostdout=True)
|
||||
replica = df_local_factory.create(proactor_threads=2, logtostdout=True)
|
||||
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
||||
|
@ -1118,13 +1106,12 @@ take_over_cases = [
|
|||
async def test_take_over_counters(df_local_factory, master_threads, replica_threads):
|
||||
master = df_local_factory.create(
|
||||
proactor_threads=master_threads,
|
||||
port=BASE_PORT,
|
||||
# vmodule="journal_slice=2,dflycmd=2,main_service=1",
|
||||
logtostderr=True,
|
||||
)
|
||||
replica1 = df_local_factory.create(port=BASE_PORT + 1, proactor_threads=replica_threads)
|
||||
replica2 = df_local_factory.create(port=BASE_PORT + 2, proactor_threads=replica_threads)
|
||||
replica3 = df_local_factory.create(port=BASE_PORT + 3, proactor_threads=replica_threads)
|
||||
replica1 = df_local_factory.create(proactor_threads=replica_threads)
|
||||
replica2 = df_local_factory.create(proactor_threads=replica_threads)
|
||||
replica3 = df_local_factory.create(proactor_threads=replica_threads)
|
||||
df_local_factory.start_all([master, replica1, replica2, replica3])
|
||||
c_master = master.client()
|
||||
c1 = replica1.client()
|
||||
|
@ -1183,16 +1170,14 @@ async def test_take_over_seeder(
|
|||
tmp_file_name = "".join(random.choices(string.ascii_letters, k=10))
|
||||
master = df_local_factory.create(
|
||||
proactor_threads=master_threads,
|
||||
port=BASE_PORT,
|
||||
dbfilename=f"dump_{tmp_file_name}",
|
||||
logtostderr=True,
|
||||
)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, proactor_threads=replica_threads)
|
||||
replica = df_local_factory.create(proactor_threads=replica_threads)
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
||||
seeder = df_seeder_factory.create(port=master.port, keys=1000, dbcount=5, stop_on_failure=False)
|
||||
|
||||
c_master = master.client()
|
||||
c_replica = replica.client()
|
||||
|
||||
await c_replica.execute_command(f"REPLICAOF localhost {master.port}")
|
||||
|
@ -1215,9 +1200,10 @@ async def test_take_over_seeder(
|
|||
assert master.proc.poll() == 0, "Master process did not exit correctly."
|
||||
|
||||
master.start()
|
||||
c_master = master.client()
|
||||
await wait_available_async(c_master)
|
||||
|
||||
capture = await seeder.capture()
|
||||
capture = await seeder.capture(port=master.port)
|
||||
assert await seeder.compare(capture, port=replica.port)
|
||||
|
||||
await disconnect_clients(c_master, c_replica)
|
||||
|
@ -1225,8 +1211,8 @@ async def test_take_over_seeder(
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_take_over_timeout(df_local_factory, df_seeder_factory):
|
||||
master = df_local_factory.create(proactor_threads=2, port=BASE_PORT, logtostderr=True)
|
||||
replica = df_local_factory.create(port=BASE_PORT + 1, proactor_threads=2)
|
||||
master = df_local_factory.create(proactor_threads=2, logtostderr=True)
|
||||
replica = df_local_factory.create(proactor_threads=2)
|
||||
df_local_factory.start_all([master, replica])
|
||||
|
||||
seeder = df_seeder_factory.create(port=master.port, keys=1000, dbcount=5, stop_on_failure=False)
|
||||
|
@ -1234,6 +1220,8 @@ async def test_take_over_timeout(df_local_factory, df_seeder_factory):
|
|||
c_master = master.client()
|
||||
c_replica = replica.client()
|
||||
|
||||
print("PORTS ARE: ", master.port, replica.port)
|
||||
|
||||
await c_replica.execute_command(f"REPLICAOF localhost {master.port}")
|
||||
await wait_available_async(c_replica)
|
||||
|
||||
|
@ -1279,7 +1267,6 @@ async def test_no_tls_on_admin_port(
|
|||
no_tls_on_admin_port="true",
|
||||
admin_port=ADMIN_PORT,
|
||||
**with_tls_server_args,
|
||||
port=BASE_PORT,
|
||||
requirepass="XXX",
|
||||
proactor_threads=t_master,
|
||||
)
|
||||
|
@ -1294,7 +1281,6 @@ async def test_no_tls_on_admin_port(
|
|||
no_tls_on_admin_port="true",
|
||||
admin_port=ADMIN_PORT + 1,
|
||||
**with_tls_server_args,
|
||||
port=BASE_PORT + 1,
|
||||
proactor_threads=t_replica,
|
||||
requirepass="XXX",
|
||||
masterauth="XXX",
|
||||
|
@ -1333,7 +1319,7 @@ async def test_tls_replication(
|
|||
master = df_local_factory.create(
|
||||
tls_replication="true",
|
||||
**with_ca_tls_server_args,
|
||||
port=BASE_PORT,
|
||||
port=1111,
|
||||
admin_port=ADMIN_PORT,
|
||||
proactor_threads=t_master,
|
||||
)
|
||||
|
@ -1347,7 +1333,6 @@ async def test_tls_replication(
|
|||
replica = df_local_factory.create(
|
||||
tls_replication="true",
|
||||
**with_ca_tls_server_args,
|
||||
port=BASE_PORT + 1,
|
||||
proactor_threads=t_replica,
|
||||
)
|
||||
replica.start()
|
||||
|
@ -1393,7 +1378,6 @@ async def wait_for_replica_status(replica: aioredis.Redis, status: str, wait_for
|
|||
async def test_replicaof_flag(df_local_factory):
|
||||
# tests --replicaof works under normal conditions
|
||||
master = df_local_factory.create(
|
||||
port=BASE_PORT,
|
||||
proactor_threads=2,
|
||||
)
|
||||
|
||||
|
@ -1405,9 +1389,8 @@ async def test_replicaof_flag(df_local_factory):
|
|||
assert 1 == db_size
|
||||
|
||||
replica = df_local_factory.create(
|
||||
port=BASE_PORT + 1,
|
||||
proactor_threads=2,
|
||||
replicaof=f"localhost:{BASE_PORT}", # start to replicate master
|
||||
replicaof=f"localhost:{master.port}", # start to replicate master
|
||||
)
|
||||
|
||||
# set up replica. check that it is replicating
|
||||
|
@ -1416,6 +1399,7 @@ async def test_replicaof_flag(df_local_factory):
|
|||
|
||||
await wait_available_async(c_replica) # give it time to startup
|
||||
await wait_for_replica_status(c_replica, status="up") # wait until we have a connection
|
||||
await check_all_replicas_finished([c_replica], c_master)
|
||||
|
||||
dbsize = await c_replica.dbsize()
|
||||
assert 1 == dbsize
|
||||
|
@ -1427,8 +1411,8 @@ async def test_replicaof_flag(df_local_factory):
|
|||
@pytest.mark.asyncio
|
||||
async def test_replicaof_flag_replication_waits(df_local_factory):
|
||||
# tests --replicaof works when we launch replication before the master
|
||||
BASE_PORT = 1111
|
||||
replica = df_local_factory.create(
|
||||
port=BASE_PORT + 1,
|
||||
proactor_threads=2,
|
||||
replicaof=f"localhost:{BASE_PORT}", # start to replicate master
|
||||
)
|
||||
|
@ -1459,6 +1443,7 @@ async def test_replicaof_flag_replication_waits(df_local_factory):
|
|||
|
||||
# check that replication works now
|
||||
await wait_for_replica_status(c_replica, status="up")
|
||||
await check_all_replicas_finished([c_replica], c_master)
|
||||
|
||||
dbsize = await c_replica.dbsize()
|
||||
assert 1 == dbsize
|
||||
|
@ -1471,7 +1456,6 @@ async def test_replicaof_flag_replication_waits(df_local_factory):
|
|||
async def test_replicaof_flag_disconnect(df_local_factory):
|
||||
# test stopping replication when started using --replicaof
|
||||
master = df_local_factory.create(
|
||||
port=BASE_PORT,
|
||||
proactor_threads=2,
|
||||
)
|
||||
|
||||
|
@ -1485,9 +1469,8 @@ async def test_replicaof_flag_disconnect(df_local_factory):
|
|||
assert 1 == db_size
|
||||
|
||||
replica = df_local_factory.create(
|
||||
port=BASE_PORT + 1,
|
||||
proactor_threads=2,
|
||||
replicaof=f"localhost:{BASE_PORT}", # start to replicate master
|
||||
replicaof=f"localhost:{master.port}", # start to replicate master
|
||||
)
|
||||
|
||||
# set up replica. check that it is replicating
|
||||
|
@ -1496,6 +1479,7 @@ async def test_replicaof_flag_disconnect(df_local_factory):
|
|||
c_replica = aioredis.Redis(port=replica.port)
|
||||
await wait_available_async(c_replica)
|
||||
await wait_for_replica_status(c_replica, status="up")
|
||||
await check_all_replicas_finished([c_replica], c_master)
|
||||
|
||||
dbsize = await c_replica.dbsize()
|
||||
assert 1 == dbsize
|
||||
|
@ -1512,13 +1496,11 @@ async def test_replicaof_flag_disconnect(df_local_factory):
|
|||
@pytest.mark.asyncio
|
||||
async def test_df_crash_on_memcached_error(df_local_factory):
|
||||
master = df_local_factory.create(
|
||||
port=BASE_PORT,
|
||||
memcached_port=11211,
|
||||
proactor_threads=2,
|
||||
)
|
||||
|
||||
replica = df_local_factory.create(
|
||||
port=master.port + 1,
|
||||
memcached_port=master.mc_port + 1,
|
||||
proactor_threads=2,
|
||||
)
|
||||
|
|
|
@ -17,3 +17,4 @@ prometheus_client==0.17.0
|
|||
aiohttp==3.8.4
|
||||
numpy==1.24.3
|
||||
pytest-json-report==1.5.0
|
||||
psutil==5.9.5
|
||||
|
|
|
@ -56,7 +56,7 @@ class TestRdbSnapshot(SnapshotTestBase):
|
|||
assert await async_client.flushall()
|
||||
await async_client.execute_command("DEBUG LOAD " + super().get_main_file("test-rdb-*.rdb"))
|
||||
|
||||
assert await seeder.compare(start_capture)
|
||||
assert await seeder.compare(start_capture, port=df_server.port)
|
||||
|
||||
|
||||
@dfly_args({**BASIC_ARGS, "dbfilename": "test-rdbexact.rdb", "nodf_snapshot_format": None})
|
||||
|
@ -81,7 +81,7 @@ class TestRdbSnapshotExactFilename(SnapshotTestBase):
|
|||
main_file = super().get_main_file("test-rdbexact.rdb")
|
||||
await async_client.execute_command("DEBUG LOAD " + main_file)
|
||||
|
||||
assert await seeder.compare(start_capture)
|
||||
assert await seeder.compare(start_capture, port=df_server.port)
|
||||
|
||||
|
||||
@dfly_args({**BASIC_ARGS, "dbfilename": "test-dfs"})
|
||||
|
@ -107,7 +107,7 @@ class TestDflySnapshot(SnapshotTestBase):
|
|||
"DEBUG LOAD " + super().get_main_file("test-dfs-summary.dfs")
|
||||
)
|
||||
|
||||
assert await seeder.compare(start_capture)
|
||||
assert await seeder.compare(start_capture, port=df_server.port)
|
||||
|
||||
|
||||
# We spawn instances manually, so reduce memory usage of default to minimum
|
||||
|
@ -236,7 +236,7 @@ class TestDflySnapshotOnShutdown(SnapshotTestBase):
|
|||
await wait_available_async(a_client)
|
||||
await a_client.connection_pool.disconnect()
|
||||
|
||||
assert await seeder.compare(start_capture)
|
||||
assert await seeder.compare(start_capture, port=df_server.port)
|
||||
|
||||
|
||||
@dfly_args({**BASIC_ARGS, "dbfilename": "test-info-persistence"})
|
||||
|
|
|
@ -6,20 +6,20 @@ from . import DflyStartException
|
|||
|
||||
async def test_tls_no_auth(df_factory, with_tls_server_args):
|
||||
# Needs some authentication
|
||||
server = df_factory.create(port=1111, **with_tls_server_args)
|
||||
server = df_factory.create(**with_tls_server_args)
|
||||
with pytest.raises(DflyStartException):
|
||||
server.start()
|
||||
|
||||
|
||||
async def test_tls_no_key(df_factory):
|
||||
# Needs a private key and certificate.
|
||||
server = df_factory.create(port=1112, tls=None, requirepass="XXX")
|
||||
server = df_factory.create(tls=None, requirepass="XXX")
|
||||
with pytest.raises(DflyStartException):
|
||||
server.start()
|
||||
|
||||
|
||||
async def test_tls_password(df_factory, with_tls_server_args, gen_ca_cert):
|
||||
server = df_factory.create(port=1113, requirepass="XXX", **with_tls_server_args)
|
||||
server = df_factory.create(requirepass="XXX", **with_tls_server_args)
|
||||
server.start()
|
||||
async with server.client(
|
||||
ssl=True, password="XXX", ssl_ca_certs=gen_ca_cert["ca_cert"]
|
||||
|
@ -31,7 +31,7 @@ async def test_tls_password(df_factory, with_tls_server_args, gen_ca_cert):
|
|||
async def test_tls_client_certs(
|
||||
df_factory, with_ca_tls_server_args, with_tls_client_args, gen_ca_cert
|
||||
):
|
||||
server = df_factory.create(port=1114, **with_ca_tls_server_args)
|
||||
server = df_factory.create(**with_ca_tls_server_args)
|
||||
server.start()
|
||||
async with server.client(**with_tls_client_args, ssl_ca_certs=gen_ca_cert["ca_cert"]) as client:
|
||||
await client.ping()
|
||||
|
@ -39,13 +39,13 @@ async def test_tls_client_certs(
|
|||
|
||||
|
||||
async def test_client_tls_no_auth(df_factory):
|
||||
server = df_factory.create(port=1115, tls_replication=None)
|
||||
server = df_factory.create(tls_replication=None)
|
||||
with pytest.raises(DflyStartException):
|
||||
server.start()
|
||||
|
||||
|
||||
async def test_client_tls_password(df_factory):
|
||||
server = df_factory.create(port=1116, tls_replication=None, masterauth="XXX")
|
||||
server = df_factory.create(tls_replication=None, masterauth="XXX")
|
||||
server.start()
|
||||
server.stop()
|
||||
|
||||
|
@ -53,6 +53,6 @@ async def test_client_tls_password(df_factory):
|
|||
async def test_client_tls_cert(df_factory, with_tls_server_args):
|
||||
key_args = with_tls_server_args.copy()
|
||||
key_args.pop("tls")
|
||||
server = df_factory.create(port=1117, tls_replication=None, **key_args)
|
||||
server = df_factory.create(tls_replication=None, **key_args)
|
||||
server.start()
|
||||
server.stop()
|
||||
|
|
|
@ -10,6 +10,7 @@ import time
|
|||
import difflib
|
||||
import json
|
||||
import subprocess
|
||||
import os
|
||||
from enum import Enum
|
||||
|
||||
|
||||
|
@ -581,3 +582,22 @@ def gen_certificate(
|
|||
# Use CA's private key to sign dragonfly's CSR and get back the signed certificate
|
||||
step2 = rf"openssl x509 -req -in {certificate_request_path} -days 1 -CA {ca_certificate_path} -CAkey {ca_key_path} -CAcreateserial -out {certificate_path}"
|
||||
subprocess.run(step2, shell=True)
|
||||
|
||||
|
||||
class EnvironCntx:
|
||||
def __init__(self, **kwargs):
|
||||
self.updates = kwargs
|
||||
self.undo = {}
|
||||
|
||||
def __enter__(self):
|
||||
for k, v in self.updates.items():
|
||||
if k in os.environ:
|
||||
self.undo[k] = os.environ[k]
|
||||
os.environ[k] = v
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
for k, v in self.updates.items():
|
||||
if k in self.undo:
|
||||
os.environ[k] = self.undo[k]
|
||||
else:
|
||||
del os.environ[k]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue