mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
chore: tune TieredStorageTest.MemoryPressure (#3805)
* chore: tune TieredStorageTest.MemoryPressure * chore: print more stats on failure
This commit is contained in:
parent
fbf12e9abb
commit
3945b7e4fa
4 changed files with 18 additions and 6 deletions
|
@ -312,7 +312,7 @@ TieredStorage::TieredStorage(size_t max_size, DbSlice* db_slice)
|
||||||
bins_{make_unique<tiering::SmallBins>()} {
|
bins_{make_unique<tiering::SmallBins>()} {
|
||||||
write_depth_limit_ = absl::GetFlag(FLAGS_tiered_storage_write_depth);
|
write_depth_limit_ = absl::GetFlag(FLAGS_tiered_storage_write_depth);
|
||||||
size_t mem_per_shard = max_memory_limit / shard_set->size();
|
size_t mem_per_shard = max_memory_limit / shard_set->size();
|
||||||
SetMemoryLowLimit(absl::GetFlag(FLAGS_tiered_low_memory_factor) * mem_per_shard);
|
SetMemoryLowWatermark(absl::GetFlag(FLAGS_tiered_low_memory_factor) * mem_per_shard);
|
||||||
}
|
}
|
||||||
|
|
||||||
TieredStorage::~TieredStorage() {
|
TieredStorage::~TieredStorage() {
|
||||||
|
@ -329,7 +329,7 @@ void TieredStorage::Close() {
|
||||||
op_manager_->Close();
|
op_manager_->Close();
|
||||||
}
|
}
|
||||||
|
|
||||||
void TieredStorage::SetMemoryLowLimit(size_t mem_limit) {
|
void TieredStorage::SetMemoryLowWatermark(size_t mem_limit) {
|
||||||
op_manager_->memory_low_limit_ = mem_limit;
|
op_manager_->memory_low_limit_ = mem_limit;
|
||||||
VLOG(1) << "Memory low limit is " << mem_limit;
|
VLOG(1) << "Memory low limit is " << mem_limit;
|
||||||
}
|
}
|
||||||
|
@ -338,6 +338,7 @@ util::fb2::Future<string> TieredStorage::Read(DbIndex dbid, string_view key,
|
||||||
const PrimeValue& value) {
|
const PrimeValue& value) {
|
||||||
util::fb2::Future<std::string> fut;
|
util::fb2::Future<std::string> fut;
|
||||||
Read(dbid, key, value, [fut](const std::string& value) mutable { fut.Resolve(value); });
|
Read(dbid, key, value, [fut](const std::string& value) mutable { fut.Resolve(value); });
|
||||||
|
|
||||||
return fut;
|
return fut;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ class TieredStorage {
|
||||||
std::error_code Open(std::string_view path);
|
std::error_code Open(std::string_view path);
|
||||||
void Close();
|
void Close();
|
||||||
|
|
||||||
void SetMemoryLowLimit(size_t mem_limit);
|
void SetMemoryLowWatermark(size_t mem_limit);
|
||||||
|
|
||||||
// Read offloaded value. It must be of external type
|
// Read offloaded value. It must be of external type
|
||||||
util::fb2::Future<std::string> Read(DbIndex dbid, std::string_view key, const PrimeValue& value);
|
util::fb2::Future<std::string> Read(DbIndex dbid, std::string_view key, const PrimeValue& value);
|
||||||
|
|
|
@ -301,7 +301,8 @@ TEST_F(TieredStorageTest, FlushPending) {
|
||||||
|
|
||||||
TEST_F(TieredStorageTest, MemoryPressure) {
|
TEST_F(TieredStorageTest, MemoryPressure) {
|
||||||
max_memory_limit = 20_MB;
|
max_memory_limit = 20_MB;
|
||||||
pp_->at(0)->AwaitBrief([] { EngineShard::tlocal()->tiered_storage()->SetMemoryLowLimit(2_MB); });
|
pp_->at(0)->AwaitBrief(
|
||||||
|
[] { EngineShard::tlocal()->tiered_storage()->SetMemoryLowWatermark(2_MB); });
|
||||||
|
|
||||||
constexpr size_t kNum = 10000;
|
constexpr size_t kNum = 10000;
|
||||||
for (size_t i = 0; i < kNum; i++) {
|
for (size_t i = 0; i < kNum; i++) {
|
||||||
|
@ -310,7 +311,7 @@ TEST_F(TieredStorageTest, MemoryPressure) {
|
||||||
resp = Run({"INFO", "ALL"});
|
resp = Run({"INFO", "ALL"});
|
||||||
ASSERT_FALSE(true) << i << "\nInfo ALL:\n" << resp.GetString();
|
ASSERT_FALSE(true) << i << "\nInfo ALL:\n" << resp.GetString();
|
||||||
}
|
}
|
||||||
ThisFiber::SleepFor(300us);
|
ThisFiber::SleepFor(500us);
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPECT_LT(used_mem_peak.load(), 20_MB);
|
EXPECT_LT(used_mem_peak.load(), 20_MB);
|
||||||
|
|
|
@ -20,6 +20,16 @@ namespace dfly::tiering {
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace std::string_literals;
|
using namespace std::string_literals;
|
||||||
|
|
||||||
|
ostream& operator<<(ostream& os, const OpManager::Stats& stats) {
|
||||||
|
return os << "pending_read_cnt: " << stats.pending_read_cnt
|
||||||
|
<< ", pending_stash_cnt: " << stats.pending_stash_cnt
|
||||||
|
<< ", alloc_bytes: " << stats.disk_stats.allocated_bytes
|
||||||
|
<< ", capacity_bytes: " << stats.disk_stats.capacity_bytes
|
||||||
|
<< ", heap_buf_allocs: " << stats.disk_stats.heap_buf_alloc_count
|
||||||
|
<< ", registered_buf_allocs: " << stats.disk_stats.registered_buf_alloc_count
|
||||||
|
<< ", max_file_size: " << stats.disk_stats.max_file_size;
|
||||||
|
}
|
||||||
|
|
||||||
struct OpManagerTest : PoolTestBase, OpManager {
|
struct OpManagerTest : PoolTestBase, OpManager {
|
||||||
OpManagerTest() : OpManager(256_MB) {
|
OpManagerTest() : OpManager(256_MB) {
|
||||||
}
|
}
|
||||||
|
@ -76,7 +86,7 @@ TEST_F(OpManagerTest, SimpleStashesWithReads) {
|
||||||
while (stashed_.size() < 100)
|
while (stashed_.size() < 100)
|
||||||
util::ThisFiber::SleepFor(1ms);
|
util::ThisFiber::SleepFor(1ms);
|
||||||
|
|
||||||
EXPECT_EQ(GetStats().disk_stats.allocated_bytes, 100 * kPageSize);
|
EXPECT_EQ(GetStats().disk_stats.allocated_bytes, 100 * kPageSize) << GetStats();
|
||||||
|
|
||||||
for (unsigned i = 0; i < 100; i++) {
|
for (unsigned i = 0; i < 100; i++) {
|
||||||
EXPECT_GE(stashed_[i].offset, i > 0);
|
EXPECT_GE(stashed_[i].offset, i > 0);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue