mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-11 02:15:45 +02:00
fix: regtest failures (#5013)
1. Fix a crash bug in RESETSTAT when number of shards is less than number of threads. 2. Tune regtests parameters for pipelining tests to pass. Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
parent
aa7e8abcb6
commit
23e6db900d
3 changed files with 8 additions and 6 deletions
3
.github/workflows/regression-tests.yml
vendored
3
.github/workflows/regression-tests.yml
vendored
|
@ -55,9 +55,6 @@ jobs:
|
||||||
aws-access-key-id: ${{ secrets.AWS_S3_ACCESS_KEY }}
|
aws-access-key-id: ${{ secrets.AWS_S3_ACCESS_KEY }}
|
||||||
aws-secret-access-key: ${{ secrets.AWS_S3_ACCESS_SECRET }}
|
aws-secret-access-key: ${{ secrets.AWS_S3_ACCESS_SECRET }}
|
||||||
s3-bucket: ${{ secrets.S3_REGTEST_BUCKET }}
|
s3-bucket: ${{ secrets.S3_REGTEST_BUCKET }}
|
||||||
# Chain ternary oprator of the form (which can be nested)
|
|
||||||
# (expression == condition && <true expression> || <false expression>)
|
|
||||||
epoll: ${{ matrix.proactor == 'Epoll' && 'epoll' || 'iouring' }}
|
|
||||||
|
|
||||||
- name: Upload logs on failure
|
- name: Upload logs on failure
|
||||||
if: failure()
|
if: failure()
|
||||||
|
|
|
@ -2224,7 +2224,10 @@ void ServerFamily::ResetStat(Namespace* ns) {
|
||||||
shard_set->pool()->AwaitBrief(
|
shard_set->pool()->AwaitBrief(
|
||||||
[registry = service_.mutable_registry(), ns](unsigned index, auto*) {
|
[registry = service_.mutable_registry(), ns](unsigned index, auto*) {
|
||||||
registry->ResetCallStats(index);
|
registry->ResetCallStats(index);
|
||||||
ns->GetCurrentDbSlice().ResetEvents();
|
EngineShard* shard = EngineShard::tlocal();
|
||||||
|
if (shard) {
|
||||||
|
ns->GetDbSlice(shard->shard_id()).ResetEvents();
|
||||||
|
}
|
||||||
facade::ResetStats();
|
facade::ResetStats();
|
||||||
ServerState::tlocal()->exec_freq_count.clear();
|
ServerState::tlocal()->exec_freq_count.clear();
|
||||||
});
|
});
|
||||||
|
|
|
@ -525,7 +525,7 @@ async def test_keyspace_events_config_set(async_client: aioredis.Redis):
|
||||||
await collect_expiring_events(pclient, keys)
|
await collect_expiring_events(pclient, keys)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.exclude_epoll
|
@dfly_args({"max_busy_read_usec": 10000})
|
||||||
async def test_reply_count(async_client: aioredis.Redis):
|
async def test_reply_count(async_client: aioredis.Redis):
|
||||||
"""Make sure reply aggregations reduce reply counts for common cases"""
|
"""Make sure reply aggregations reduce reply counts for common cases"""
|
||||||
|
|
||||||
|
@ -537,6 +537,7 @@ async def test_reply_count(async_client: aioredis.Redis):
|
||||||
await aw
|
await aw
|
||||||
return await get_reply_count() - before - 1
|
return await get_reply_count() - before - 1
|
||||||
|
|
||||||
|
await async_client.config_resetstat()
|
||||||
base = await get_reply_count()
|
base = await get_reply_count()
|
||||||
info_diff = await get_reply_count() - base
|
info_diff = await get_reply_count() - base
|
||||||
assert info_diff == 1
|
assert info_diff == 1
|
||||||
|
@ -1121,12 +1122,13 @@ async def test_send_timeout(df_server, async_client: aioredis.Redis):
|
||||||
|
|
||||||
|
|
||||||
# Test that the cache pipeline does not grow or shrink under constant pipeline load.
|
# Test that the cache pipeline does not grow or shrink under constant pipeline load.
|
||||||
@dfly_args({"proactor_threads": 1, "pipeline_squash": 9, "max_busy_read_usec": 1000})
|
@dfly_args({"proactor_threads": 1, "pipeline_squash": 9, "max_busy_read_usec": 10000})
|
||||||
async def test_pipeline_cache_only_async_squashed_dispatches(df_factory):
|
async def test_pipeline_cache_only_async_squashed_dispatches(df_factory):
|
||||||
server = df_factory.create()
|
server = df_factory.create()
|
||||||
server.start()
|
server.start()
|
||||||
|
|
||||||
client = server.client()
|
client = server.client()
|
||||||
|
await client.ping() # Make sure the connection and the protocol were established
|
||||||
|
|
||||||
async def push_pipeline(size):
|
async def push_pipeline(size):
|
||||||
p = client.pipeline(transaction=True)
|
p = client.pipeline(transaction=True)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue