mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
chore: bump up max_busy_read_usec in tests (#5039)
cycle clock like most clocks is affected by thread context switching, and maybe this is what causes test failures. Try to increase the parameter to even further. Also, add benchmarks for the clock as well as fix some compiler warnings around parser.cc Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
parent
1082724500
commit
f4f4668c7a
3 changed files with 19 additions and 2 deletions
|
@ -223,6 +223,21 @@ static void BM_ParseDoubleAbsl(benchmark::State& state) {
|
|||
}
|
||||
BENCHMARK(BM_ParseDoubleAbsl);
|
||||
|
||||
template <clockid_t cid> void BM_ClockType(benchmark::State& state) {
|
||||
timespec ts;
|
||||
while (state.KeepRunning()) {
|
||||
DoNotOptimize(clock_gettime(cid, &ts));
|
||||
}
|
||||
}
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_REALTIME);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_REALTIME_COARSE);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_MONOTONIC);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_MONOTONIC_COARSE);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_BOOTTIME);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_PROCESS_CPUTIME_ID);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_THREAD_CPUTIME_ID);
|
||||
BENCHMARK_TEMPLATE(BM_ClockType, CLOCK_BOOTTIME_ALARM);
|
||||
|
||||
static void BM_MatchGlob(benchmark::State& state) {
|
||||
string random_val = GetRandomHex(state.range(0));
|
||||
GlobMatcher matcher("*foobar*", true);
|
||||
|
|
|
@ -3,6 +3,8 @@ gen_bison(parser)
|
|||
|
||||
cur_gen_dir(gen_dir)
|
||||
|
||||
set_source_files_properties(${gen_dir}/parser.cc PROPERTIES
|
||||
COMPILE_FLAGS "-Wno-maybe-uninitialized")
|
||||
add_library(query_parser base.cc ast_expr.cc query_driver.cc search.cc indices.cc
|
||||
sort_indices.cc vector_utils.cc compressed_sorted_set.cc block_list.cc
|
||||
synonyms.cc ${gen_dir}/parser.cc ${gen_dir}/lexer.cc)
|
||||
|
|
|
@ -525,7 +525,7 @@ async def test_keyspace_events_config_set(async_client: aioredis.Redis):
|
|||
await collect_expiring_events(pclient, keys)
|
||||
|
||||
|
||||
@dfly_args({"max_busy_read_usec": 10000})
|
||||
@dfly_args({"max_busy_read_usec": 50000})
|
||||
async def test_reply_count(async_client: aioredis.Redis):
|
||||
"""Make sure reply aggregations reduce reply counts for common cases"""
|
||||
|
||||
|
@ -1122,7 +1122,7 @@ async def test_send_timeout(df_server, async_client: aioredis.Redis):
|
|||
|
||||
|
||||
# Test that the cache pipeline does not grow or shrink under constant pipeline load.
|
||||
@dfly_args({"proactor_threads": 1, "pipeline_squash": 9, "max_busy_read_usec": 10000})
|
||||
@dfly_args({"proactor_threads": 1, "pipeline_squash": 9, "max_busy_read_usec": 50000})
|
||||
async def test_pipeline_cache_only_async_squashed_dispatches(df_factory):
|
||||
server = df_factory.create()
|
||||
server.start()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue