mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-12 10:55:46 +02:00
chore: pull helio (#3350)
* chore: pull helio --------- Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
parent
c46d95db2f
commit
feb9bc266a
4 changed files with 7 additions and 4 deletions
2
helio
2
helio
|
@ -1 +1 @@
|
||||||
Subproject commit deb4c1ac263c00d6602a3e1158ccae04a9b28417
|
Subproject commit 288cb312971f07cd35fa3a75fcfe047788d7d0fb
|
|
@ -680,6 +680,8 @@ void EngineShard::RunPeriodic(std::chrono::milliseconds period_ms) {
|
||||||
|
|
||||||
// Every 8 runs, update the global stats.
|
// Every 8 runs, update the global stats.
|
||||||
if (global_count % 8 == 0) {
|
if (global_count % 8 == 0) {
|
||||||
|
DVLOG(2) << "Global periodic";
|
||||||
|
|
||||||
uint64_t sum = 0;
|
uint64_t sum = 0;
|
||||||
const auto& stats = EngineShardSet::GetCachedStats();
|
const auto& stats = EngineShardSet::GetCachedStats();
|
||||||
for (const auto& s : stats)
|
for (const auto& s : stats)
|
||||||
|
|
|
@ -206,9 +206,10 @@ TEST_F(TieredStorageTest, BackgroundOffloading) {
|
||||||
|
|
||||||
// Wait for offload to do it all again
|
// Wait for offload to do it all again
|
||||||
ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == kNum; });
|
ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == kNum; });
|
||||||
|
auto resp = Run({"INFO", "ALL"});
|
||||||
|
LOG(INFO) << "INFO " << resp.GetString();
|
||||||
auto metrics = GetMetrics();
|
auto metrics = GetMetrics();
|
||||||
EXPECT_EQ(metrics.tiered_stats.total_stashes, 2 * kNum);
|
EXPECT_EQ(metrics.tiered_stats.total_stashes, 2 * kNum) << resp.GetString();
|
||||||
EXPECT_EQ(metrics.tiered_stats.total_fetches, kNum);
|
EXPECT_EQ(metrics.tiered_stats.total_fetches, kNum);
|
||||||
EXPECT_EQ(metrics.tiered_stats.allocated_bytes, kNum * 4096);
|
EXPECT_EQ(metrics.tiered_stats.allocated_bytes, kNum * 4096);
|
||||||
}
|
}
|
||||||
|
|
|
@ -587,7 +587,7 @@ async def test_parser_memory_stats(df_server, async_client: aioredis.Redis):
|
||||||
await writer.drain()
|
await writer.drain()
|
||||||
# writer is pending because the request is not finished.
|
# writer is pending because the request is not finished.
|
||||||
stats = await async_client.execute_command("memory stats")
|
stats = await async_client.execute_command("memory stats")
|
||||||
assert stats["connections.direct_bytes"] > 150000
|
assert stats["connections.direct_bytes"] > 130000
|
||||||
|
|
||||||
|
|
||||||
async def test_reject_non_tls_connections_on_tls(with_tls_server_args, df_factory):
|
async def test_reject_non_tls_connections_on_tls(with_tls_server_args, df_factory):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue