fix: regtest failures (#5013)

1. Fix a crash bug in RESETSTAT when number of shards is less than number of threads.
2. Tune regtests parameters for pipelining tests to pass.

Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
Roman Gershman 2025-04-28 12:49:44 +03:00 committed by GitHub
parent aa7e8abcb6
commit 23e6db900d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 8 additions and 6 deletions

View file

@ -55,9 +55,6 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_S3_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_S3_ACCESS_SECRET }}
s3-bucket: ${{ secrets.S3_REGTEST_BUCKET }}
# Chain ternary oprator of the form (which can be nested)
# (expression == condition && <true expression> || <false expression>)
epoll: ${{ matrix.proactor == 'Epoll' && 'epoll' || 'iouring' }}
- name: Upload logs on failure
if: failure()

View file

@ -2224,7 +2224,10 @@ void ServerFamily::ResetStat(Namespace* ns) {
shard_set->pool()->AwaitBrief(
[registry = service_.mutable_registry(), ns](unsigned index, auto*) {
registry->ResetCallStats(index);
ns->GetCurrentDbSlice().ResetEvents();
EngineShard* shard = EngineShard::tlocal();
if (shard) {
ns->GetDbSlice(shard->shard_id()).ResetEvents();
}
facade::ResetStats();
ServerState::tlocal()->exec_freq_count.clear();
});

View file

@ -525,7 +525,7 @@ async def test_keyspace_events_config_set(async_client: aioredis.Redis):
await collect_expiring_events(pclient, keys)
@pytest.mark.exclude_epoll
@dfly_args({"max_busy_read_usec": 10000})
async def test_reply_count(async_client: aioredis.Redis):
"""Make sure reply aggregations reduce reply counts for common cases"""
@ -537,6 +537,7 @@ async def test_reply_count(async_client: aioredis.Redis):
await aw
return await get_reply_count() - before - 1
await async_client.config_resetstat()
base = await get_reply_count()
info_diff = await get_reply_count() - base
assert info_diff == 1
@ -1121,12 +1122,13 @@ async def test_send_timeout(df_server, async_client: aioredis.Redis):
# Test that the cache pipeline does not grow or shrink under constant pipeline load.
@dfly_args({"proactor_threads": 1, "pipeline_squash": 9, "max_busy_read_usec": 1000})
@dfly_args({"proactor_threads": 1, "pipeline_squash": 9, "max_busy_read_usec": 10000})
async def test_pipeline_cache_only_async_squashed_dispatches(df_factory):
server = df_factory.create()
server.start()
client = server.client()
await client.ping() # Make sure the connection and the protocol were established
async def push_pipeline(size):
p = client.pipeline(transaction=True)